entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
sequencelengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|---|
Ceil | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/jw/cjwcwzevhosrcxbbaesipqcgq2ukvqv7x25yxindnqsrtbgwjxr5.py
# Topologically Sorted Source Nodes: [ceil], Original ATen: [aten.ceil]
# Source node to ATen node mapping:
# ceil => ceil
# Graph fragment:
# %ceil : [num_users=1] = call_function[target=torch.ops.aten.ceil.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_ceil_0 = async_compile.triton('triton_poi_fused_ceil_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_ceil_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_ceil_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.ceil(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ceil], Original ATen: [aten.ceil]
stream0 = get_raw_stream(0)
triton_poi_fused_ceil_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Ceil(nn.Module):
def forward(self, x):
return torch.ceil(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_ceil_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.ceil(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_ceil_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CeilNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Ceil | false | 16,060 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Concat3 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/z5/cz5cv4otoqqg7gisuc6bx7rlau6zkmkldsfxasbc2l2hvtqjgjou.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %arg1_1, %arg2_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 12
x0 = xindex % 16
x2 = (xindex // 192)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr2 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp11 & xmask, other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x3), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, arg1_1, arg2_1, buf0, 768, grid=grid(768), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Concat3(nn.Module):
def __init__(self):
super().__init__()
def forward(self, c0, c1, c2):
return torch.cat([c0, c1, c2], dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 12
x0 = xindex % 16
x2 = xindex // 192
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tmp0 >= tmp7
tl.full([1], 12, tl.int64)
tmp14 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp11 &
xmask, other=0.0)
tmp15 = tl.where(tmp9, tmp10, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x3, tmp16, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 12, 4, 4), (192, 16, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(768)](arg0_1, arg1_1, arg2_1, buf0, 768,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class Concat3New(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
| mil-tokyo/webdnn | Concat3 | false | 16,061 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
MultiSoftmaxCrossEntropyLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pn/cpn2tnmetwnwggru4zb7citgzpoppzsml6f3jjyrvvdwql4vsw6p.py
# Topologically Sorted Source Nodes: [neg, log_softmax, mul, loss, sum_2, loss_1], Original ATen: [aten.neg, aten._log_softmax, aten.mul, aten.sum, aten.div]
# Source node to ATen node mapping:
# log_softmax => exp, log, sub_1, sum_1
# loss => sum_2
# loss_1 => div
# mul => mul
# neg => neg
# sum_2 => sum_3
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %sub_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg0_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_2, %sum_3), kwargs = {})
triton_per_fused__log_softmax_div_mul_neg_sum_1 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp2 = tl.load(in_ptr1 + (r3), None)
tmp3 = tl.load(in_ptr1 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr1 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp1 = -tmp0
tmp4 = tl_math.exp(tmp3)
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = tl_math.log(tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tmp1 * tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = tl.broadcast_to(tmp0, [RBLOCK])
tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0))
tmp23 = tmp19 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp23, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [neg, log_softmax, mul, loss, sum_2, loss_1], Original ATen: [aten.neg, aten._log_softmax, aten.mul, aten.sum, aten.div]
triton_per_fused__log_softmax_div_mul_neg_sum_1.run(buf3, arg0_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del buf0
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch as th
import torch.nn as nn
import torch.utils.data.distributed
class MultiSoftmaxCrossEntropyLoss(nn.Module):
def __init__(self, class_weight=None, label_smoothing_value=0):
super(MultiSoftmaxCrossEntropyLoss, self).__init__()
self.class_weight = class_weight
if self.class_weight is not None:
self.class_weight = self.class_weight
self.logsoftmax = nn.LogSoftmax(dim=1)
self.label_smoothing_value = label_smoothing_value
def forward(self, input, target):
return self.cross_entropy(input, target, self.class_weight)
def cross_entropy(self, pred, soft_targets, class_weight=None):
if class_weight is not None:
class_weight_matrix = class_weight.expand_as(soft_targets)
used_class_weights = th.where(soft_targets > 0,
class_weight_matrix, soft_targets)
samples_weight = th.max(used_class_weights, dim=1, keepdim=True)[0]
loss = th.mean(th.sum(-samples_weight * soft_targets * self.
logsoftmax(pred), 1), 0)
else:
if self.label_smoothing_value > 0:
batch_size, total_classes_count = soft_targets.size()
for sample_index in range(batch_size):
pos_indices = np.where(soft_targets[sample_index, :] > 0)
pos_classes_count = len(pos_indices[0])
if pos_classes_count > 0:
neg_p = self.label_smoothing_value / float(
total_classes_count - pos_classes_count)
pos_p = self.label_smoothing_value / pos_classes_count
soft_targets[sample_index, :] += neg_p
soft_targets[sample_index, pos_indices[0]
] = soft_targets[sample_index, pos_indices[0]
] - pos_p - neg_p
loss = th.sum(-soft_targets * self.logsoftmax(pred))
loss = loss / soft_targets.sum()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
import torch as th
import torch.nn as nn
import torch.utils.data.distributed
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_sum_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp2 = tl.load(in_ptr1 + r3, None)
tmp3 = tl.load(in_ptr1 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr1 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp8 = tl.load(in_ptr1 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr1 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp1 = -tmp0
tmp4 = tl_math.exp(tmp3)
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp12 = tl_math.exp(tmp11)
tmp13 = tmp10 + tmp12
tmp14 = tl_math.log(tmp13)
tmp15 = tmp2 - tmp14
tmp16 = tmp1 * tmp15
tmp17 = tl.broadcast_to(tmp16, [RBLOCK])
tmp19 = triton_helpers.promote_to_tensor(tl.sum(tmp17, 0))
tmp20 = tl.broadcast_to(tmp0, [RBLOCK])
tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0))
tmp23 = tmp19 / tmp22
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp23, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf3 = buf1
del buf1
triton_per_fused__log_softmax_div_mul_neg_sum_1[grid(1)](buf3,
arg0_1, buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del buf0
return buf3,
class MultiSoftmaxCrossEntropyLossNew(nn.Module):
def __init__(self, class_weight=None, label_smoothing_value=0):
super(MultiSoftmaxCrossEntropyLossNew, self).__init__()
self.class_weight = class_weight
if self.class_weight is not None:
self.class_weight = self.class_weight
self.logsoftmax = nn.LogSoftmax(dim=1)
self.label_smoothing_value = label_smoothing_value
def cross_entropy(self, pred, soft_targets, class_weight=None):
if class_weight is not None:
class_weight_matrix = class_weight.expand_as(soft_targets)
used_class_weights = th.where(soft_targets > 0,
class_weight_matrix, soft_targets)
samples_weight = th.max(used_class_weights, dim=1, keepdim=True)[0]
loss = th.mean(th.sum(-samples_weight * soft_targets * self.
logsoftmax(pred), 1), 0)
else:
if self.label_smoothing_value > 0:
batch_size, total_classes_count = soft_targets.size()
for sample_index in range(batch_size):
pos_indices = np.where(soft_targets[sample_index, :] > 0)
pos_classes_count = len(pos_indices[0])
if pos_classes_count > 0:
neg_p = self.label_smoothing_value / float(
total_classes_count - pos_classes_count)
pos_p = self.label_smoothing_value / pos_classes_count
soft_targets[sample_index, :] += neg_p
soft_targets[sample_index, pos_indices[0]
] = soft_targets[sample_index, pos_indices[0]
] - pos_p - neg_p
loss = th.sum(-soft_targets * self.logsoftmax(pred))
loss = loss / soft_targets.sum()
return loss
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| microsoft/vision-longformer | MultiSoftmaxCrossEntropyLoss | false | 16,062 | [
"MIT"
] | 169 | c9ce386de3e633bb3c805368d118356fbd696487 | https://github.com/microsoft/vision-longformer/tree/c9ce386de3e633bb3c805368d118356fbd696487 |
Asin | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/p3/cp3kqzlzgl7cgephsog4xpbgq5zsjs4fgtjilqtdaojinzuzk2pp.py
# Topologically Sorted Source Nodes: [asin], Original ATen: [aten.asin]
# Source node to ATen node mapping:
# asin => asin
# Graph fragment:
# %asin : [num_users=1] = call_function[target=torch.ops.aten.asin.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_asin_0 = async_compile.triton('triton_poi_fused_asin_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_asin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_asin_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.asin(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [asin], Original ATen: [aten.asin]
stream0 = get_raw_stream(0)
triton_poi_fused_asin_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Asin(nn.Module):
def forward(self, x):
return torch.asin(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_asin_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.asin(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_asin_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class AsinNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Asin | false | 16,063 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Atanh | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/lz/clzyvhf7gc5moofbpjng6qjcohvrhibsyn5za4unu3regfwa3uv7.py
# Topologically Sorted Source Nodes: [atanh], Original ATen: [aten.atanh]
# Source node to ATen node mapping:
# atanh => atanh
# Graph fragment:
# %atanh : [num_users=1] = call_function[target=torch.ops.aten.atanh.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_atanh_0 = async_compile.triton('triton_poi_fused_atanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_atanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_atanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.atanh(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [atanh], Original ATen: [aten.atanh]
stream0 = get_raw_stream(0)
triton_poi_fused_atanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Atanh(nn.Module):
def forward(self, x):
return torch.atanh(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_atanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.atanh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_atanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class AtanhNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Atanh | false | 16,064 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
AveragePool | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/f3/cf3nc7smypvt6tj3vwpqtvvlxeghtu44dgzh5zltfx73viqd3cvr.py
# Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# avg_pool2d => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%arg0_1, [3, 3], [1, 1], [0, 0], True, False), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2) % 2
x2 = (xindex // 4)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*x2)), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + (4*x1) + (16*x2)), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + (4*x1) + (16*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (4 + x0 + (4*x1) + (16*x2)), xmask)
tmp7 = tl.load(in_ptr0 + (5 + x0 + (4*x1) + (16*x2)), xmask)
tmp9 = tl.load(in_ptr0 + (6 + x0 + (4*x1) + (16*x2)), xmask)
tmp11 = tl.load(in_ptr0 + (8 + x0 + (4*x1) + (16*x2)), xmask)
tmp13 = tl.load(in_ptr0 + (9 + x0 + (4*x1) + (16*x2)), xmask)
tmp15 = tl.load(in_ptr0 + (10 + x0 + (4*x1) + (16*x2)), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tl.store(out_ptr0 + (x3), tmp18, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class AveragePool(nn.Module):
def __init__(self):
super().__init__()
self.pool = nn.AvgPool2d(kernel_size=3, stride=1, padding=0,
ceil_mode=True, count_include_pad=False)
def forward(self, x):
return self.pool(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2 % 2
x2 = xindex // 4
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * x2), xmask)
tmp1 = tl.load(in_ptr0 + (1 + x0 + 4 * x1 + 16 * x2), xmask)
tmp3 = tl.load(in_ptr0 + (2 + x0 + 4 * x1 + 16 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (4 + x0 + 4 * x1 + 16 * x2), xmask)
tmp7 = tl.load(in_ptr0 + (5 + x0 + 4 * x1 + 16 * x2), xmask)
tmp9 = tl.load(in_ptr0 + (6 + x0 + 4 * x1 + 16 * x2), xmask)
tmp11 = tl.load(in_ptr0 + (8 + x0 + 4 * x1 + 16 * x2), xmask)
tmp13 = tl.load(in_ptr0 + (9 + x0 + 4 * x1 + 16 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (10 + x0 + 4 * x1 + 16 * x2), xmask)
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp17 = 0.1111111111111111
tmp18 = tmp16 * tmp17
tl.store(out_ptr0 + x3, tmp18, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class AveragePoolNew(nn.Module):
def __init__(self):
super().__init__()
self.pool = nn.AvgPool2d(kernel_size=3, stride=1, padding=0,
ceil_mode=True, count_include_pad=False)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | AveragePool | false | 16,065 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Attention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ao/caoovxtqrx42gvkmjirowqmmbh6kppvfh5ebrzzv4kzkgwm2umii.py
# Topologically Sorted Source Nodes: [h_t], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# h_t => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vq/cvqssqiht73l27ziprpd2xxliijjblbo4tjesbcrnjwuoimqhlqq.py
# Topologically Sorted Source Nodes: [eq, attn_scores, softmax], Original ATen: [aten.eq, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# attn_scores => full_default, where
# eq => eq
# softmax => amax, exp, sub, sum_1
# Graph fragment:
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%expand, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -10000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %bmm), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
triton_poi_fused__softmax_eq_masked_fill_1 = async_compile.triton('triton_poi_fused__softmax_eq_masked_fill_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_eq_masked_fill_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_eq_masked_fill_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = -10000000000.0
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 == tmp1
tmp9 = tl.where(tmp7, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp12 = tmp11 == tmp1
tmp14 = tl.where(tmp12, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp17 = tmp16 == tmp1
tmp19 = tl.where(tmp17, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + (x2), tmp20, xmask)
tl.store(out_ptr1 + (x2), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5e/c5e5kpcd7wh4cvkrmbjt34vlegxvaggfbkw2n2jvxuum7nvwuiig.py
# Topologically Sorted Source Nodes: [eq, attn_scores, softmax], Original ATen: [aten.eq, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# attn_scores => full_default, where
# eq => eq
# softmax => amax, div, exp, sub
# Graph fragment:
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%expand, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -10000000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %bmm), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_eq_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_eq_masked_fill_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_eq_masked_fill_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_eq_masked_fill_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex
x4 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_out_ptr0 + (x3), xmask)
tmp6 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = -10000000000.0
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_t], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_t__1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_s], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(primals_2, buf2, 64, grid=grid(64), stream=stream0)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [score], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf3)
del buf1
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [eq, attn_scores, softmax], Original ATen: [aten.eq, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_eq_masked_fill_1.run(primals_4, buf3, buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [eq, attn_scores, softmax], Original ATen: [aten.eq, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_eq_masked_fill_2.run(buf6, primals_4, buf4, buf5, 64, grid=grid(64), stream=stream0)
del buf4
del buf5
return (buf6, primals_4, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf6, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, hidden_size):
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.linear_in = nn.Linear(hidden_size, hidden_size, bias=False)
def score(self, hidden_state, encoder_states):
"""
Args:
hidden_state (tgt_len(=1), batch, hidden_size)
encoder_states (src_len, batch, hidden_size)
Return:
score (batch, tgt_len(=1), src_len)
"""
h_t = hidden_state.transpose(0, 1).contiguous()
h_s = encoder_states.transpose(0, 1).contiguous()
_src_batch, _src_len, _src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
score = torch.bmm(h_t, h_s_)
return score
def forward(self, hidden, encoder_outputs, src_mask, tgt_mask=None):
"""
Args:
hidden (tgt_len(=1), batch, hidden_size)
encoder_outputs (src_len, batch, hidden_size)
src_mask (batch, src_len)
tgt_mask (batch, tgt_len)
Return:
attn: (batch, tgt_len(=1), src_len)
"""
tgt_len, b_size, _hiddim = hidden.size()
src_len = encoder_outputs.size(0)
attn_scores = self.score(hidden, encoder_outputs)
src_mask = src_mask.unsqueeze(1).expand(b_size, tgt_len, src_len)
attn_scores = attn_scores.masked_fill(src_mask == 0, -10000000000.0)
if tgt_mask is not None:
tgt_mask = tgt_mask.unsqueeze(2).expand(b_size, tgt_len, src_len)
attn_scores = attn_scores.masked_fill(tgt_mask == 0, -10000000000.0
)
return F.softmax(attn_scores, dim=2)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_eq_masked_fill_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = -10000000000.0
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 == tmp1
tmp9 = tl.where(tmp7, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp12 = tmp11 == tmp1
tmp14 = tl.where(tmp12, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp17 = tmp16 == tmp1
tmp19 = tl.where(tmp17, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x2, tmp20, xmask)
tl.store(out_ptr1 + x2, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_eq_masked_fill_2(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex
x4 = xindex // 4
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_out_ptr0 + x3, xmask)
tmp6 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = -10000000000.0
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tl.store(in_out_ptr0 + x3, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_0[grid(64)](primals_2, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), out=buf3)
del buf1
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_eq_masked_fill_1[grid(16)](primals_4,
buf3, buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf6 = buf3
del buf3
triton_poi_fused__softmax_eq_masked_fill_2[grid(64)](buf6,
primals_4, buf4, buf5, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf4
del buf5
return buf6, primals_4, reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), buf6, buf2
class AttentionNew(nn.Module):
def __init__(self, hidden_size):
super(AttentionNew, self).__init__()
self.hidden_size = hidden_size
self.linear_in = nn.Linear(hidden_size, hidden_size, bias=False)
def score(self, hidden_state, encoder_states):
"""
Args:
hidden_state (tgt_len(=1), batch, hidden_size)
encoder_states (src_len, batch, hidden_size)
Return:
score (batch, tgt_len(=1), src_len)
"""
h_t = hidden_state.transpose(0, 1).contiguous()
h_s = encoder_states.transpose(0, 1).contiguous()
_src_batch, _src_len, _src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
score = torch.bmm(h_t, h_s_)
return score
def forward(self, input_0, input_1, input_2):
primals_3 = self.linear_in.weight
primals_1 = input_0
primals_2 = input_1
primals_4 = input_2
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| michiyasunaga/DrRepair | Attention | false | 16,066 | [
"MIT"
] | 139 | fb447594149ac4f80fef8ba091373184120019c7 | https://github.com/michiyasunaga/DrRepair/tree/fb447594149ac4f80fef8ba091373184120019c7 |
Acos | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/id/cidpai6h3jazpcjxlgbgi3gxmigqbwhgdvtnhhwc4g5m72pksczu.py
# Topologically Sorted Source Nodes: [acos], Original ATen: [aten.acos]
# Source node to ATen node mapping:
# acos => acos
# Graph fragment:
# %acos : [num_users=1] = call_function[target=torch.ops.aten.acos.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_acos_0 = async_compile.triton('triton_poi_fused_acos_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_acos_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_acos_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.acos(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [acos], Original ATen: [aten.acos]
stream0 = get_raw_stream(0)
triton_poi_fused_acos_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Acos(nn.Module):
def forward(self, x):
return torch.acos(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_acos_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.acos(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_acos_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class AcosNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Acos | false | 16,067 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Concat4 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/va/cvae5g4nrdf6t3avgwt27jisql4uieqvhjyd573ke3pilhv74vet.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %arg1_1, %arg2_1, %arg3_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 16
x0 = xindex % 16
x2 = (xindex // 256)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp9 & xmask, other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + (16*((-8) + x1)) + (64*x2)), tmp14 & xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 16, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (x0 + (16*((-12) + x1)) + (64*x2)), tmp16 & xmask, other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + (x3), tmp22, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, arg1_1, arg2_1, arg3_1, buf0, 1024, grid=grid(1024), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Concat4(nn.Module):
def __init__(self):
super().__init__()
def forward(self, c0, c1, c2, c3):
return torch.cat([c0, c1, c2, c3], dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 16
x0 = xindex % 16
x2 = xindex // 256
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp9 & xmask,
other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 12, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x0 + 16 * (-8 + x1) + 64 * x2), tmp14 &
xmask, other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 16, tl.int64)
tmp19 = tl.load(in_ptr3 + (x0 + 16 * (-12 + x1) + 64 * x2), tmp16 &
xmask, other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x3, tmp22, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 4, 4), (256, 16, 4, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(1024)](arg0_1, arg1_1, arg2_1, arg3_1,
buf0, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
return buf0,
class Concat4New(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| mil-tokyo/webdnn | Concat4 | false | 16,068 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Cast | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xw/cxwmrcimrksralgxvli55qy2jg2yq7sumzdt4qrony2pg6vfw6id.py
# Topologically Sorted Source Nodes: [type_1], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# type_1 => convert_element_type
# Graph fragment:
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%arg0_1, torch.int32), kwargs = {})
triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int32)
# Topologically Sorted Source Nodes: [type_1], Original ATen: [aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Cast(nn.Module):
def forward(self, x):
return x.type(torch.int32)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0.to(tl.int32)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int32)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CastNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Cast | false | 16,069 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Asinh | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/e4/ce43sbzobrl74xsrke76vzaskmodalszacnmvlzyev6xwbgzpdru.py
# Topologically Sorted Source Nodes: [asinh], Original ATen: [aten.asinh]
# Source node to ATen node mapping:
# asinh => asinh
# Graph fragment:
# %asinh : [num_users=1] = call_function[target=torch.ops.aten.asinh.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_asinh_0 = async_compile.triton('triton_poi_fused_asinh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_asinh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_asinh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.asinh(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [asinh], Original ATen: [aten.asinh]
stream0 = get_raw_stream(0)
triton_poi_fused_asinh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Asinh(nn.Module):
def forward(self, x):
return torch.asinh(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_asinh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.asinh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_asinh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class AsinhNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Asinh | false | 16,070 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
ResBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/td/ctdv3m5a33kovvtng5iilth4k6mtnyfcota6hhwoiqm34iumu7wi.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# pad => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 6) % 6
x0 = xindex % 6
x2 = (xindex // 36)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t6/ct6syu6rq3n7yx3zuog2yujcrfreefdccraqz7zj2m3c5xhvp5vl.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x => convolution
# x_1 => add, rsqrt, var_mean
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp23, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/da/cdaomigtpu7qm7mfdbrwghyh7hd5b5ma5oayrbdkijjlwdjmenyo.py
# Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# pad_1 => constant_pad_nd_1
# Graph fragment:
# %constant_pad_nd_1 : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%view_3, [1, 1, 1, 1], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_2 = async_compile.triton('triton_poi_fused_constant_pad_nd_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 6) % 6
x0 = xindex % 6
x2 = (xindex // 36)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0)
tmp12 = tl.load(in_ptr1 + (x2), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp13 = tmp11 - tmp12
tmp14 = tl.load(in_ptr2 + (x2), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tmp13 * tmp14
tmp16 = 0.0
tmp17 = tmp15 > tmp16
tmp18 = 0.2
tmp19 = tmp15 * tmp18
tmp20 = tl.where(tmp17, tmp15, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp10, tmp20, tmp21)
tl.store(out_ptr0 + (x4), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zj/czjmq3zcguqrld6wqqldqbpx3xlawhci2nnuldynzx6eu2u7mdri.py
# Topologically Sorted Source Nodes: [x_3, out, out_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.add]
# Source node to ATen node mapping:
# out => add_1, rsqrt_1, var_mean_1
# out_1 => add_2
# x_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd_1, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_5, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_6, %primals_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_convolution_3 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (r2 + (16*x3)), xmask, other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 16.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp27 = tmp25 + tmp26
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.store(out_ptr2 + (r2 + (16*x3)), tmp27, xmask)
tl.store(out_ptr3 + (x3), tmp24, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf6 = reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_1.run(buf2, buf6, primals_3, buf3, 16, 16, grid=grid(16), stream=stream0)
del primals_3
buf7 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad_1], Original ATen: [aten.constant_pad_nd]
triton_poi_fused_constant_pad_nd_2.run(buf2, buf3, buf6, buf7, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8; del buf8 # reuse
buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_3, out, out_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.add]
triton_per_fused__native_batch_norm_legit_add_convolution_3.run(buf9, primals_5, primals_1, buf10, buf14, buf13, 16, 16, grid=grid(16), stream=stream0)
del primals_1
del primals_5
return (buf14, primals_2, primals_4, buf0, buf2, buf3, buf6, buf7, buf9, reinterpret_tensor(buf13, (16, ), (1, ), 0), reinterpret_tensor(buf10, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = None
self.bias = None
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, 'Please assign weight and bias before calling AdaIN!'
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(x_reshaped, running_mean, running_var, self.
weight, self.bias, True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class ResBlock(nn.Module):
def __init__(self, dim, norm='in', activation='relu', pad_type='zero'):
super(ResBlock, self).__init__()
padding = 1
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
self.conv1 = nn.Conv2d(dim, dim, 3, 1, bias=True)
if norm == 'in':
self.norm1 = nn.InstanceNorm2d(dim)
elif norm == 'adain':
self.norm1 = AdaptiveInstanceNorm2d(dim)
self.relu1 = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(dim, dim, 3, 1, bias=True)
if norm == 'in':
self.norm2 = nn.InstanceNorm2d(dim)
elif norm == 'adain':
self.norm2 = AdaptiveInstanceNorm2d(dim)
def forward(self, x):
residual = x
x = self.conv1(self.pad(x))
x = self.norm1(x)
x = self.relu1(x)
x = self.conv2(self.pad(x))
out = self.norm2(x)
out += residual
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 6
x0 = xindex % 6
x2 = xindex // 36
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_constant_pad_nd_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 6
x0 = xindex % 6
x2 = xindex // 36
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tmp12 = tl.load(in_ptr1 + x2, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp13 = tmp11 - tmp12
tmp14 = tl.load(in_ptr2 + x2, tmp10 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tmp13 * tmp14
tmp16 = 0.0
tmp17 = tmp15 > tmp16
tmp18 = 0.2
tmp19 = tmp15 * tmp18
tmp20 = tl.where(tmp17, tmp15, tmp19)
tmp21 = tl.full(tmp20.shape, 0.0, tmp20.dtype)
tmp22 = tl.where(tmp10, tmp20, tmp21)
tl.store(out_ptr0 + x4, tmp22, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_3(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr1 + (r2 + 16 * x3), xmask, other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 16.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp27 = tmp25 + tmp26
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.store(out_ptr2 + (r2 + 16 * x3), tmp27, xmask)
tl.store(out_ptr3 + x3, tmp24, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(576)](primals_1, buf0, 576,
XBLOCK=256, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf4 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf6 = reinterpret_tensor(buf4, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf4
triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2,
buf6, primals_3, buf3, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
buf7 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_constant_pad_nd_2[grid(576)](buf2, buf3, buf6,
buf7, 576, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 4, 4), (64, 16, 4, 1))
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf14 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_convolution_3[grid(16)](
buf9, primals_5, primals_1, buf10, buf14, buf13, 16, 16, XBLOCK
=1, num_warps=2, num_stages=1)
del primals_1
del primals_5
return (buf14, primals_2, primals_4, buf0, buf2, buf3, buf6, buf7, buf9,
reinterpret_tensor(buf13, (16,), (1,), 0), reinterpret_tensor(buf10,
(1, 16, 1, 1), (16, 1, 1, 1), 0))
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-05, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
self.weight = None
self.bias = None
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, 'Please assign weight and bias before calling AdaIN!'
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(x_reshaped, running_mean, running_var, self.
weight, self.bias, True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class ResBlockNew(nn.Module):
def __init__(self, dim, norm='in', activation='relu', pad_type='zero'):
super(ResBlockNew, self).__init__()
padding = 1
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, 'Unsupported padding type: {}'.format(pad_type)
self.conv1 = nn.Conv2d(dim, dim, 3, 1, bias=True)
if norm == 'in':
self.norm1 = nn.InstanceNorm2d(dim)
elif norm == 'adain':
self.norm1 = AdaptiveInstanceNorm2d(dim)
self.relu1 = nn.LeakyReLU(0.2, inplace=True)
self.conv2 = nn.Conv2d(dim, dim, 3, 1, bias=True)
if norm == 'in':
self.norm2 = nn.InstanceNorm2d(dim)
elif norm == 'adain':
self.norm2 = AdaptiveInstanceNorm2d(dim)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| microsoft/S2R-DepthNet | ResBlock | false | 16,071 | [
"MIT"
] | 144 | aebc931c7e8c7baad4dec2a0fd8643244741c52e | https://github.com/microsoft/S2R-DepthNet/tree/aebc931c7e8c7baad4dec2a0fd8643244741c52e |
Sign | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/zy/czyscnnxj3g3lyyjhg3q4airae4625yoe5ygsjf2qxbxvi3gpiqn.py
# Topologically Sorted Source Nodes: [sign], Original ATen: [aten.sign]
# Source node to ATen node mapping:
# sign => sign
# Graph fragment:
# %sign : [num_users=1] = call_function[target=torch.ops.aten.sign.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_sign_0 = async_compile.triton('triton_poi_fused_sign_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sign_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sign_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sign], Original ATen: [aten.sign]
stream0 = get_raw_stream(0)
triton_poi_fused_sign_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Sign(nn.Module):
def forward(self, x):
return torch.sign(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sign_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp1 < tmp0
tmp3 = tmp2.to(tl.int8)
tmp4 = tmp0 < tmp1
tmp5 = tmp4.to(tl.int8)
tmp6 = tmp3 - tmp5
tmp7 = tmp6.to(tmp0.dtype)
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sign_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SignNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Sign | false | 16,072 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Neg | # AOT ID: ['2_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/hk/chkwhavqfzukyktba7dvmj6ss52pfnxbzhqfb7gpkrye7sko7lrp.py
# Topologically Sorted Source Nodes: [losses], Original ATen: [aten.neg]
# Source node to ATen node mapping:
# losses => neg
# Graph fragment:
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_neg_0 = async_compile.triton('triton_poi_fused_neg_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_neg_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = -tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [losses], Original ATen: [aten.neg]
stream0 = get_raw_stream(0)
triton_poi_fused_neg_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Neg(nn.Module):
def forward(self, x):
return torch.neg(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_neg_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = -tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_neg_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class NegNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Neg | false | 16,073 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Reciprocal | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yb/cybxmqgstg473ic3ozmef5imn5esyxvm3ttfpkjco3dcshvnl2bq.py
# Topologically Sorted Source Nodes: [reciprocal], Original ATen: [aten.reciprocal]
# Source node to ATen node mapping:
# reciprocal => reciprocal
# Graph fragment:
# %reciprocal : [num_users=1] = call_function[target=torch.ops.aten.reciprocal.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_reciprocal_0 = async_compile.triton('triton_poi_fused_reciprocal_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reciprocal_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp1 / tmp0
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [reciprocal], Original ATen: [aten.reciprocal]
stream0 = get_raw_stream(0)
triton_poi_fused_reciprocal_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Reciprocal(nn.Module):
def forward(self, x):
return torch.reciprocal(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reciprocal_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 1, tl.int32)
tmp2 = tmp1 / tmp0
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reciprocal_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ReciprocalNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Reciprocal | false | 16,074 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
ReduceSum3 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fu/cfugpijri3clfpuapc4wxa6wwpihjzjz2hqxwbepvdypwzl4hcwa.py
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1, 3]), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex % 4
r3 = (rindex // 4)
x0 = xindex % 4
x1 = (xindex // 4)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (4*x0) + (16*r3) + (64*x1)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(arg0_1, buf0, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class ReduceSum3(nn.Module):
def forward(self, x):
return torch.sum(x, (1, 3), keepdim=False)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex % 4
r3 = rindex // 4
x0 = xindex % 4
x1 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 4 * x0 + 16 * r3 + 64 * x1), xmask,
other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x4, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(16)](arg0_1, buf0, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
return buf0,
class ReduceSum3New(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | ReduceSum3 | false | 16,075 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Gather1D | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/sl/csltn76pmzp7oam6vowttuyiac4akgpnfflgqxfwkmln65ctjdyx.py
# Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.index]
# Source node to ATen node mapping:
# getitem => index
# Graph fragment:
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [%lift_fresh_copy]), kwargs = {})
triton_poi_fused_index_0 = async_compile.triton('triton_poi_fused_index_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 64)
x0 = xindex % 64
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 4, tl.int64)
tmp6 = tl.full([1], 5, tl.int64)
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.where(tmp2, tmp3, tmp7)
tmp9 = tl.load(in_ptr0 + (x0 + (64*tmp8)), xmask)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (6, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [getitem], Original ATen: [aten.index]
stream0 = get_raw_stream(0)
triton_poi_fused_index_0.run(arg0_1, buf0, 192, grid=grid(192), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((6, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Gather1D(nn.Module):
def forward(self, x):
return x[[2, 4, 5]]
def get_inputs():
return [torch.rand([6, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 64
x0 = xindex % 64
x2 = xindex
tmp0 = x1
tmp1 = tl.full([1], 1, tl.int64)
tmp2 = tmp0 < tmp1
tmp3 = tl.full([1], 2, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.full([1], 4, tl.int64)
tmp6 = tl.full([1], 5, tl.int64)
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tl.where(tmp2, tmp3, tmp7)
tmp9 = tl.load(in_ptr0 + (x0 + 64 * tmp8), xmask)
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (6, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((3, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_0[grid(192)](arg0_1, buf0, 192, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class Gather1DNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Gather1D | false | 16,076 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
ReduceMin | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fe/cfe4viram64qoxaifupscvcwmob24bfwl5vfh5dvzxoani3drhwr.py
# Topologically Sorted Source Nodes: [min_1], Original ATen: [aten.min]
# Source node to ATen node mapping:
# min_1 => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%min_1, 0), kwargs = {})
triton_poi_fused_min_0 = async_compile.triton('triton_poi_fused_min_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_min_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_min_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [min_1], Original ATen: [aten.min]
stream0 = get_raw_stream(0)
triton_poi_fused_min_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class ReduceMin(nn.Module):
def forward(self, x):
return torch.min(x, -1, keepdim=True)[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_min_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.minimum(tmp0, tmp1)
tmp4 = triton_helpers.minimum(tmp2, tmp3)
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_min_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class ReduceMinNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | ReduceMin | false | 16,077 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
ReduceMean | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3z/c3zp6tzyjvuxkyhpqg37skoz3sh23okiwohqkcwueawnz24zmgzs.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1], True), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class ReduceMean(nn.Module):
def forward(self, x):
return torch.mean(x, -1, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class ReduceMeanNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | ReduceMean | false | 16,078 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
ReduceSum | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ek/cekesse56z222vklldbcjcdpyhlgnxfldkdczm4xsok5kwwai7kt.py
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [-1], True), kwargs = {})
triton_poi_fused_sum_0 = async_compile.triton('triton_poi_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_sum_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class ReduceSum(nn.Module):
def forward(self, x):
return torch.sum(x, -1, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class ReduceSumNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | ReduceSum | false | 16,079 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Cosh | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/n2/cn2ttpejq4wyarkvnvxkhd4bmoj6o2jn27rcols3yup2kdbf6uff.py
# Topologically Sorted Source Nodes: [cosh], Original ATen: [aten.cosh]
# Source node to ATen node mapping:
# cosh => cosh
# Graph fragment:
# %cosh : [num_users=1] = call_function[target=torch.ops.aten.cosh.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_cosh_0 = async_compile.triton('triton_poi_fused_cosh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cosh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cosh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.cosh(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cosh], Original ATen: [aten.cosh]
stream0 = get_raw_stream(0)
triton_poi_fused_cosh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Cosh(nn.Module):
def forward(self, x):
return torch.cosh(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cosh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.cosh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cosh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CoshNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Cosh | false | 16,080 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Permute | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/z2/cz2lhzme5zyhs5hlawn2dswkebrh6k7bxajpzfsml6t5s7oyc4f5.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 1.0), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (4, 4, 4), (1, 16, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Permute(nn.Module):
def forward(self, x):
x = x + 1.0
return x.permute(2, 0, 1)
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (4, 4, 4), (1, 16, 4), 0),
class PermuteNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Permute | false | 16,081 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Softsign | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/p5/cp5p3gl5ql57e4neclfv4vl2iwxdlgkslachrrr4wbs4bafshsmq.py
# Topologically Sorted Source Nodes: [softsign], Original ATen: [aten.abs, aten.add, aten.div]
# Source node to ATen node mapping:
# softsign => abs_1, add, div
# Graph fragment:
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%abs_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %add), kwargs = {})
triton_poi_fused_abs_add_div_0 = async_compile.triton('triton_poi_fused_abs_add_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_abs_add_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_abs_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl_math.abs(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softsign], Original ATen: [aten.abs, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_abs_add_div_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Softsign(nn.Module):
def forward(self, x):
return torch.nn.Softsign()(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_abs_add_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl_math.abs(tmp0)
tmp2 = 1.0
tmp3 = tmp1 + tmp2
tmp4 = tmp0 / tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_abs_add_div_0[grid(256)](arg0_1, buf0, 256, XBLOCK
=128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SoftsignNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Softsign | false | 16,082 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
ReduceMax | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/54/c54fiiozms64dqfszq2hf52cdztx43kas6yivnlda7p3bxzbtzle.py
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
# Source node to ATen node mapping:
# max_1 => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%max_1, 0), kwargs = {})
triton_poi_fused_max_0 = async_compile.triton('triton_poi_fused_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_1], Original ATen: [aten.max]
stream0 = get_raw_stream(0)
triton_poi_fused_max_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class ReduceMax(nn.Module):
def forward(self, x):
return torch.max(x, -1, keepdim=True)[0]
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class ReduceMaxNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | ReduceMax | false | 16,083 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
ReLUExp | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/iy/ciyr4fx5ubxtilwwtvo24ydxq2ptznndh3cipavf3zk3ycnma4dr.py
# Topologically Sorted Source Nodes: [relu, exp], Original ATen: [aten.relu, aten.exp]
# Source node to ATen node mapping:
# exp => exp
# relu => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%relu,), kwargs = {})
triton_poi_fused_exp_relu_0 = async_compile.triton('triton_poi_fused_exp_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_exp_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_exp_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl_math.exp(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu, exp], Original ATen: [aten.relu, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_exp_relu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
import torch.nn.functional as F
class ReLUExp(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.exp(F.relu(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_exp_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = tl_math.exp(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_exp_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ReLUExpNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | ReLUExp | false | 16,084 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Cube | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3d/c3dangftdmmv7x7wunyed6apz7pjkkshsuijcbifrlbl6doqwg4b.py
# Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow]
# Source node to ATen node mapping:
# pow_1 => pow_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 3), kwargs = {})
triton_poi_fused_pow_0 = async_compile.triton('triton_poi_fused_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp0
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1], Original ATen: [aten.pow]
stream0 = get_raw_stream(0)
triton_poi_fused_pow_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class CubeFunctionBackward(torch.autograd.Function):
@staticmethod
def forward(ctx, X, M):
ctx.save_for_backward(X, M)
return M * 3 * X ** 2
@staticmethod
def backward(ctx, V):
X, M = ctx.saved_tensors
return V * 6 * X * M, V * 3 * X ** 2
class CubeFunction(torch.autograd.Function):
"""
Dummy activation function x -> x ** 3
"""
@staticmethod
def forward(ctx, X):
ctx.save_for_backward(X)
return X ** 3
@staticmethod
def backward(ctx, M):
X, = ctx.saved_tensors
return CubeFunctionBackward.apply(X, M)
class Cube(nn.Module):
def __init__(self):
super().__init__()
def forward(self, X):
return CubeFunction.apply(X)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp0
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_pow_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CubeFunctionBackward(torch.autograd.Function):
@staticmethod
def forward(ctx, X, M):
ctx.save_for_backward(X, M)
return M * 3 * X ** 2
@staticmethod
def backward(ctx, V):
X, M = ctx.saved_tensors
return V * 6 * X * M, V * 3 * X ** 2
class CubeFunction(torch.autograd.Function):
"""
Dummy activation function x -> x ** 3
"""
@staticmethod
def forward(ctx, X):
ctx.save_for_backward(X)
return X ** 3
@staticmethod
def backward(ctx, M):
X, = ctx.saved_tensors
return CubeFunctionBackward.apply(X, M)
class CubeNew(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| minhnhat93/didyprog | Cube | false | 16,086 | [
"MIT"
] | 57 | 78886ed939d269b9b2bcb192bf849aa34082880c | https://github.com/minhnhat93/didyprog/tree/78886ed939d269b9b2bcb192bf849aa34082880c |
ReduceSum2 | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fu/cfugpijri3clfpuapc4wxa6wwpihjzjz2hqxwbepvdypwzl4hcwa.py
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1, 3], True), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex % 4
r3 = (rindex // 4)
x0 = xindex % 4
x1 = (xindex // 4)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + (4*x0) + (16*r3) + (64*x1)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + (x4), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 1), (4, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(arg0_1, buf0, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class ReduceSum2(nn.Module):
def forward(self, x):
return torch.sum(x, (1, 3), keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex % 4
r3 = rindex // 4
x0 = xindex % 4
x1 = xindex // 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (r2 + 4 * x0 + 16 * r3 + 64 * x1), xmask,
other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tl.store(out_ptr0 + x4, tmp4, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 4, 1), (4, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(16)](arg0_1, buf0, 16, 16, XBLOCK=8,
num_warps=2, num_stages=1)
del arg0_1
return buf0,
class ReduceSum2New(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | ReduceSum2 | false | 16,087 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
MaxPool | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/zy/czyoewkxjsfypm4kcck3gqsi4qzf6zf5nh3u7zyx3czerrtp5gvf.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 2) % 2
x0 = xindex % 2
x3 = (xindex // 2)
x4 = xindex
tmp0 = 2*x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2*x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + ((2*x0) + (8*x3)), tmp10 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp12 = 1 + (2*x0)
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x3)), tmp16 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2 + (2*x0)
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (2 + (2*x0) + (8*x3)), tmp23 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 1 + (2*x1)
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x3)), tmp30 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x3)), tmp33 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (6 + (2*x0) + (8*x3)), tmp36 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 2 + (2*x1)
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (8 + (2*x0) + (8*x3)), tmp43 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (9 + (2*x0) + (8*x3)), tmp46 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (10 + (2*x0) + (8*x3)), tmp49 & xmask, eviction_policy='evict_last', other=float("-inf"))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tl.store(out_ptr0 + (x4), tmp51, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class MaxPool(nn.Module):
def __init__(self):
super().__init__()
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0,
ceil_mode=True)
def forward(self, x):
return self.pool(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 2 % 2
x0 = xindex % 2
x3 = xindex // 2
x4 = xindex
tmp0 = 2 * x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tmp2 & tmp4
tmp6 = 2 * x0
tmp7 = tmp6 >= tmp1
tmp8 = tmp6 < tmp3
tmp9 = tmp7 & tmp8
tmp10 = tmp5 & tmp9
tmp11 = tl.load(in_ptr0 + (2 * x0 + 8 * x3), tmp10 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp12 = 1 + 2 * x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp13 & tmp14
tmp16 = tmp5 & tmp15
tmp17 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x3), tmp16 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 2 + 2 * x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp20 & tmp21
tmp23 = tmp5 & tmp22
tmp24 = tl.load(in_ptr0 + (2 + 2 * x0 + 8 * x3), tmp23 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 1 + 2 * x1
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp27 & tmp28
tmp30 = tmp29 & tmp9
tmp31 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x3), tmp30 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = tmp29 & tmp15
tmp34 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x3), tmp33 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp35 = triton_helpers.maximum(tmp34, tmp32)
tmp36 = tmp29 & tmp22
tmp37 = tl.load(in_ptr0 + (6 + 2 * x0 + 8 * x3), tmp36 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp38 = triton_helpers.maximum(tmp37, tmp35)
tmp39 = 2 + 2 * x1
tmp40 = tmp39 >= tmp1
tmp41 = tmp39 < tmp3
tmp42 = tmp40 & tmp41
tmp43 = tmp42 & tmp9
tmp44 = tl.load(in_ptr0 + (8 + 2 * x0 + 8 * x3), tmp43 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp45 = triton_helpers.maximum(tmp44, tmp38)
tmp46 = tmp42 & tmp15
tmp47 = tl.load(in_ptr0 + (9 + 2 * x0 + 8 * x3), tmp46 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp48 = triton_helpers.maximum(tmp47, tmp45)
tmp49 = tmp42 & tmp22
tmp50 = tl.load(in_ptr0 + (10 + 2 * x0 + 8 * x3), tmp49 & xmask,
eviction_policy='evict_last', other=float('-inf'))
tmp51 = triton_helpers.maximum(tmp50, tmp48)
tl.store(out_ptr0 + x4, tmp51, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 2, 2), (16, 4, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(64)](arg0_1, buf0,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class MaxPoolNew(nn.Module):
def __init__(self):
super().__init__()
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0,
ceil_mode=True)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | MaxPool | false | 16,088 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Tan | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pl/cplgw6lo7axu4gxqjglf3tz5zvdlexgadsjyhboti7whuhtooomm.py
# Topologically Sorted Source Nodes: [tan], Original ATen: [aten.tan]
# Source node to ATen node mapping:
# tan => tan
# Graph fragment:
# %tan : [num_users=1] = call_function[target=torch.ops.aten.tan.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_tan_0 = async_compile.triton('triton_poi_fused_tan_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_tan_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_tan_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.tan(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tan], Original ATen: [aten.tan]
stream0 = get_raw_stream(0)
triton_poi_fused_tan_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Tan(nn.Module):
def forward(self, x):
return torch.tan(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_tan_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.tan(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_tan_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class TanNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Tan | false | 16,089 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
ReduceProd | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/if/cifookltmrnygtemn7436fympa5nsbye4eynvmfudscu5wu2hbre.py
# Topologically Sorted Source Nodes: [prod], Original ATen: [aten.prod]
# Source node to ATen node mapping:
# prod => prod
# Graph fragment:
# %prod : [num_users=1] = call_function[target=torch.ops.aten.prod.dim_int](args = (%arg0_1, -1, True), kwargs = {})
triton_poi_fused_prod_0 = async_compile.triton('triton_poi_fused_prod_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_prod_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_prod_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [prod], Original ATen: [aten.prod]
stream0 = get_raw_stream(0)
triton_poi_fused_prod_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class ReduceProd(nn.Module):
def forward(self, x):
return torch.prod(x, -1, keepdim=True)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_prod_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_prod_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class ReduceProdNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | ReduceProd | false | 16,090 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
LinearPotential | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7y/c7y3pudkrsmghaqbs6ez3b5i7sprk7rieig2l3evimupubwgasie.py
# Topologically Sorted Source Nodes: [potentials, setitem], Original ATen: [aten.add, aten.copy]
# Source node to ATen node mapping:
# potentials => add_1
# setitem => copy
# Graph fragment:
# %add_1 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%unsqueeze_3, %unsqueeze_5), kwargs = {})
# %copy : [num_users=1] = call_function[target=torch.ops.aten.copy.default](args = (%select_1, %expand), kwargs = {})
# %select_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.select_scatter.default](args = (%add_1, %copy, 1, 0), kwargs = {})
triton_poi_fused_add_copy_0 = async_compile.triton('triton_poi_fused_add_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_copy_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 4
x1 = (xindex // 4) % 4
x3 = (xindex // 64)
x4 = (xindex // 4)
x6 = xindex % 16
x7 = xindex
tmp3 = tl.load(in_ptr0 + (x1 + (16*x3)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x6), xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp6 + tmp4
tmp9 = tmp7 + tmp8
tmp10 = tl.where(tmp2, tmp5, tmp9)
tl.store(out_ptr0 + (x7), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0), primals_3, out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [potentials, setitem], Original ATen: [aten.add, aten.copy]
stream0 = get_raw_stream(0)
triton_poi_fused_add_copy_0.run(buf0, primals_4, primals_1, buf1, 256, grid=grid(256), stream=stream0)
del buf0
del primals_1
del primals_4
return (buf1, reinterpret_tensor(primals_2, (4, 16), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import Parameter
class LinearPotential(torch.nn.Module):
def __init__(self, n_features, n_states, init_idx=None, eos_idx=None):
super(LinearPotential, self).__init__()
self.transition = Parameter(torch.zeros((n_states, n_states)))
self.init_idx = init_idx
self.eos_idx = eos_idx
self.weight = Parameter(torch.zeros((n_features, n_states)))
self.bias = Parameter(torch.zeros(n_states))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
nn.init.xavier_uniform_(self.transition)
self.bias.data.fill_(0.0)
def forward(self, X):
n_states = self.transition.shape[0]
batch_size = X.shape[0]
unary_potentials = torch.matmul(X, self.weight[None, :, :]
) + self.bias[None, None, :]
potentials = unary_potentials[:, :, :, None] + self.transition[None,
None, :, :]
if self.init_idx is not None:
potentials[:, 0, :, :] = 0
else:
potentials[:, 0, :, :] = unary_potentials[:, 0, :, None].expand(
-1, -1, n_states)
if self.eos_idx is not None:
potentials[:, -1, :, :] = self.transition[None, :, :].expand(
batch_size, -1, -1)
return potentials
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'n_features': 4, 'n_states': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_copy_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 4
x1 = xindex // 4 % 4
x3 = xindex // 64
x4 = xindex // 4
x6 = xindex % 16
x7 = xindex
tmp3 = tl.load(in_ptr0 + (x1 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x6, xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = tmp0 == tmp1
tmp5 = tmp3 + tmp4
tmp7 = tmp6 + tmp4
tmp9 = tmp7 + tmp8
tmp10 = tl.where(tmp2, tmp5, tmp9)
tl.store(out_ptr0 + x7, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (16, 4), (4, 1), 0),
primals_3, out=buf0)
del primals_3
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_copy_0[grid(256)](buf0, primals_4, primals_1,
buf1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_1
del primals_4
return buf1, reinterpret_tensor(primals_2, (4, 16), (1, 4), 0)
class LinearPotentialNew(torch.nn.Module):
def __init__(self, n_features, n_states, init_idx=None, eos_idx=None):
super(LinearPotentialNew, self).__init__()
self.transition = Parameter(torch.zeros((n_states, n_states)))
self.init_idx = init_idx
self.eos_idx = eos_idx
self.weight = Parameter(torch.zeros((n_features, n_states)))
self.bias = Parameter(torch.zeros(n_states))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight)
nn.init.xavier_uniform_(self.transition)
self.bias.data.fill_(0.0)
def forward(self, input_0):
primals_1 = self.transition
primals_3 = self.weight
primals_4 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| minhnhat93/didyprog | LinearPotential | false | 16,091 | [
"MIT"
] | 57 | 78886ed939d269b9b2bcb192bf849aa34082880c | https://github.com/minhnhat93/didyprog/tree/78886ed939d269b9b2bcb192bf849aa34082880c |
MSELoss2d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/m4/cm4xtb3bnuq3knchbeowql4btzwa7fgl5fl2ojphcrhtylwyafoa.py
# Topologically Sorted Source Nodes: [softmax, loss], Original ATen: [aten._softmax, aten.mse_loss]
# Source node to ATen node mapping:
# loss => mean, pow_1, sub_1
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %arg1_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused__softmax_mse_loss_1 = async_compile.triton('triton_per_fused__softmax_mse_loss_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__softmax_mse_loss_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 6, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__softmax_mse_loss_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = (rindex // 64)
tmp0 = tl.load(in_ptr0 + (r3), None)
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (r3), None)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [softmax, loss], Original ATen: [aten._softmax, aten.mse_loss]
triton_per_fused__softmax_mse_loss_1.run(buf2, buf0, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg1_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MSELoss2d(nn.Module):
def __init__(self, size_average=None, reduce=None, reduction='mean',
ignore_index=255):
super(MSELoss2d, self).__init__()
self.MSE = nn.MSELoss(size_average=size_average, reduce=reduce,
reduction=reduction)
def forward(self, output, target):
loss = self.MSE(torch.softmax(output, dim=1), target)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_per_fused__softmax_mse_loss_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r3 = rindex
r0 = rindex % 16
r2 = rindex // 64
tmp0 = tl.load(in_ptr0 + r3, None)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr1 + r3, None)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp10 = tmp8 - tmp9
tmp11 = tmp10 * tmp10
tmp12 = tl.broadcast_to(tmp11, [RBLOCK])
tmp14 = triton_helpers.promote_to_tensor(tl.sum(tmp12, 0))
tmp15 = 256.0
tmp16 = tmp14 / tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused__softmax_mse_loss_1[grid(1)](buf2, buf0, arg1_1, 1,
256, num_warps=2, num_stages=1)
del arg1_1
del buf0
return buf2,
class MSELoss2dNew(nn.Module):
def __init__(self, size_average=None, reduce=None, reduction='mean',
ignore_index=255):
super(MSELoss2dNew, self).__init__()
self.MSE = nn.MSELoss(size_average=size_average, reduce=reduce,
reduction=reduction)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mimiliaogo/DACS | MSELoss2d | false | 16,092 | [
"MIT"
] | 59 | 9f13e32566c293de560df4848b23631d9e11cf32 | https://github.com/mimiliaogo/DACS/tree/9f13e32566c293de560df4848b23631d9e11cf32 |
AugCNN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ur/curzudn4ai4j7lgrmbqwy57jpcw3gylwk4nkg6jt7lqh577w5ku7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_3, 49152, grid=grid(49152), stream=stream0)
del primals_3
return (buf1, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((3, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class AugCNN(nn.Module):
"""
Convolutional Neural Network used as Augmentation
"""
def __init__(self):
super(AugCNN, self).__init__()
self.aug = Conv2d_tf(3, 3, kernel_size=3)
apply_init_(self.modules())
self.train()
def forward(self, obs):
return self.aug(obs)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (3, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_3, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(49152)](buf1, primals_3, 49152,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
return buf1, primals_1, primals_2
def apply_init_(modules):
"""
Initialize NN modules
"""
for m in modules:
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class Conv2d_tf(nn.Conv2d):
"""
Conv2d with the padding behavior from TF
"""
def __init__(self, *args, **kwargs):
super(Conv2d_tf, self).__init__(*args, **kwargs)
self.padding = kwargs.get('padding', 'SAME')
def _compute_padding(self, input, dim):
input_size = input.size(dim + 2)
filter_size = self.weight.size(dim + 2)
effective_filter_size = (filter_size - 1) * self.dilation[dim] + 1
out_size = (input_size + self.stride[dim] - 1) // self.stride[dim]
total_padding = max(0, (out_size - 1) * self.stride[dim] +
effective_filter_size - input_size)
additional_padding = int(total_padding % 2 != 0)
return additional_padding, total_padding
def forward(self, input):
if self.padding == 'VALID':
return F.conv2d(input, self.weight, self.bias, self.stride,
padding=0, dilation=self.dilation, groups=self.groups)
rows_odd, padding_rows = self._compute_padding(input, dim=0)
cols_odd, padding_cols = self._compute_padding(input, dim=1)
if rows_odd or cols_odd:
input = F.pad(input, [0, cols_odd, 0, rows_odd])
return F.conv2d(input, self.weight, self.bias, self.stride, padding
=(padding_rows // 2, padding_cols // 2), dilation=self.dilation,
groups=self.groups)
class AugCNNNew(nn.Module):
"""
Convolutional Neural Network used as Augmentation
"""
def __init__(self):
super(AugCNNNew, self).__init__()
self.aug = Conv2d_tf(3, 3, kernel_size=3)
apply_init_(self.modules())
self.train()
def forward(self, input_0):
primals_2 = self.aug.weight
primals_3 = self.aug.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| minqi/auto-drac | AugCNN | false | 16,093 | [
"MIT"
] | 84 | 59a25bbabd51946d7a645db9c5d59071b73b006d | https://github.com/minqi/auto-drac/tree/59a25bbabd51946d7a645db9c5d59071b73b006d |
ClassicalFC1 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/v2/cv27lodxqolscap3swqbzgsc23zki5wz2ve2wpwjjuwp2v6jnukj.py
# Topologically Sorted Source Nodes: [x_3, output, squeeze], Original ATen: [aten.mul, aten._log_softmax, aten.squeeze, aten._log_softmax_backward_data]
# Source node to ATen node mapping:
# output => amax, exp, log, sub, sub_1, sum_1
# squeeze => squeeze
# x_3 => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%addmm_1, %addmm_1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.default](args = (%sub_1,), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_per_fused__log_softmax__log_softmax_backward_data_mul_squeeze_0 = async_compile.triton('triton_per_fused__log_softmax__log_softmax_backward_data_mul_squeeze_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax__log_softmax_backward_data_mul_squeeze_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax__log_softmax_backward_data_mul_squeeze_0(in_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, float("-inf"))
tmp5 = triton_helpers.max2(tmp4, 1)[:, None]
tmp6 = tmp1 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.where(rmask & xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl_math.log(tmp11)
tmp13 = tmp6 - tmp12
tmp14 = tl_math.exp(tmp13)
tl.store(out_ptr2 + (r1 + (10*x0)), tmp13, rmask & xmask)
tl.store(out_ptr3 + (r1 + (10*x0)), tmp14, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (1024, 784), (784, 1))
assert_size_stride(primals_3, (1024, ), (1, ))
assert_size_stride(primals_4, (10, 1024), (1024, 1))
assert_size_stride(primals_5, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(primals_2, (784, 1024), (1, 784), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4, (1024, 10), (1, 1024), 0), alpha=1, beta=1, out=buf1)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3, output, squeeze], Original ATen: [aten.mul, aten._log_softmax, aten.squeeze, aten._log_softmax_backward_data]
stream0 = get_raw_stream(0)
triton_per_fused__log_softmax__log_softmax_backward_data_mul_squeeze_0.run(buf1, buf4, buf5, 4, 10, grid=grid(4), stream=stream0)
return (buf4, primals_1, buf0, buf1, buf5, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1024, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((10, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.utils.prune
import torch.backends.cudnn
import torch.cuda
import torch.nn
import torch.utils.data
class ClassicalFC1(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 1024)
self.fc2 = nn.Linear(1024, 10)
def forward(self, x):
bsz = x.shape[0]
x = x.view(bsz, 784)
x = self.fc1(x)
x = self.fc2(x)
x = x * x
output = F.log_softmax(x, dim=1)
return output.squeeze()
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.utils.prune
import torch.backends.cudnn
import torch.cuda
import torch.nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__log_softmax__log_softmax_backward_data_mul_squeeze_0(
in_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp4 = tl.where(rmask & xmask, tmp2, float('-inf'))
tmp5 = triton_helpers.max2(tmp4, 1)[:, None]
tmp6 = tmp1 - tmp5
tmp7 = tl_math.exp(tmp6)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp10 = tl.where(rmask & xmask, tmp8, 0)
tmp11 = tl.sum(tmp10, 1)[:, None]
tmp12 = tl_math.log(tmp11)
tmp13 = tmp6 - tmp12
tmp14 = tl_math.exp(tmp13)
tl.store(out_ptr2 + (r1 + 10 * x0), tmp13, rmask & xmask)
tl.store(out_ptr3 + (r1 + 10 * x0), tmp14, rmask & xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (1024, 784), (784, 1))
assert_size_stride(primals_3, (1024,), (1,))
assert_size_stride(primals_4, (10, 1024), (1024, 1))
assert_size_stride(primals_5, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.addmm(primals_3, primals_1, reinterpret_tensor(
primals_2, (784, 1024), (1, 784), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_5, buf0, reinterpret_tensor(primals_4,
(1024, 10), (1, 1024), 0), alpha=1, beta=1, out=buf1)
del primals_5
buf4 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
buf5 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__log_softmax__log_softmax_backward_data_mul_squeeze_0[
grid(4)](buf1, buf4, buf5, 4, 10, XBLOCK=1, num_warps=2,
num_stages=1)
return buf4, primals_1, buf0, buf1, buf5, primals_4
class ClassicalFC1New(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(784, 1024)
self.fc2 = nn.Linear(1024, 10)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| mit-han-lab/pytorch-quantum | ClassicalFC1 | false | 16,094 | [
"MIT"
] | 98 | 05cf000d689307f6b1fe02d12744ad455685935b | https://github.com/mit-han-lab/pytorch-quantum/tree/05cf000d689307f6b1fe02d12744ad455685935b |
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ma/cma6x5bz2pwajdzlgc46dd6l2kf54jhvdqofvqvagobkfkoaglsn.py
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, dsc, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# dsc => div
# intersection => sum_1
# mul => mul
# mul_1 => mul_1
# sub => sub
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + ((64*(r0 // 16)) + (r0 % 16)), None)
tmp1 = tl.load(in_ptr1 + ((64*(r0 // 16)) + (r0 % 16)), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = tmp14 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, dsc, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.parallel
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
self.smooth = 1.0
def forward(self, y_pred, y_true):
assert y_pred.size() == y_true.size()
y_pred = y_pred[:, 0].contiguous().view(-1)
y_true = y_true[:, 0].contiguous().view(-1)
intersection = (y_pred * y_true).sum()
dsc = (2.0 * intersection + self.smooth) / (y_pred.sum() + y_true.
sum() + self.smooth)
return 1.0 - dsc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.parallel
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (64 * (r0 // 16) + r0 % 16), None)
tmp1 = tl.load(in_ptr1 + (64 * (r0 // 16) + r0 % 16), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.sum(tmp3, 1)[:, None]
tmp6 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tmp9 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 1.0
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = tmp15 / tmp17
tmp19 = tmp14 - tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
def __init__(self):
super(DiceLossNew, self).__init__()
self.smooth = 1.0
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mkoivi-ms/unet-pytorch-azureml | DiceLoss | false | 16,095 | [
"MIT"
] | 517 | f0fa5b15cfad19f6b04bb309a965726c25c39e03 | https://github.com/mkoivi-ms/unet-pytorch-azureml/tree/f0fa5b15cfad19f6b04bb309a965726c25c39e03 |
chroma_subsampling | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/55/c55xeg7h533g7nm5g2b6wdnokzoxkacwj5gi4cwvuithrfob64eb.py
# Topologically Sorted Source Nodes: [cb], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# cb => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%unsqueeze, [2, 2], [2, 2], [0, 0], False, False), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (1 + (8*x0) + (32*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (5 + (8*x0) + (32*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (17 + (8*x0) + (32*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (21 + (8*x0) + (32*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vr/cvrunofn7cfbpeyn5wdzlhq4aw752z3x2xgyyzxa2cne36j5mwnp.py
# Topologically Sorted Source Nodes: [cr], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# cr => avg_pool2d_1
# Graph fragment:
# %avg_pool2d_1 : [num_users=1] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%unsqueeze_1, [2, 2], [2, 2], [0, 0], False, False), kwargs = {})
triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 + (8*x0) + (32*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (6 + (8*x0) + (32*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (18 + (8*x0) + (32*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (22 + (8*x0) + (32*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 2, 2), (4, 1, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [cb], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(arg0_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 1, 2, 2), (4, 1, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [cr], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_1.run(arg0_1, buf1, 16, grid=grid(16), stream=stream0)
return (reinterpret_tensor(arg0_1, (4, 4, 4), (64, 16, 4), 0), reinterpret_tensor(buf0, (4, 2, 2), (4, 2, 1), 0), reinterpret_tensor(buf1, (4, 2, 2), (4, 2, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class chroma_subsampling(nn.Module):
""" Chroma subsampling on CbCv channels
Input:
image(tensor): batch x height x width x 3
Output:
y(tensor): batch x height x width
cb(tensor): batch x height/2 x width/2
cr(tensor): batch x height/2 x width/2
"""
def __init__(self):
super(chroma_subsampling, self).__init__()
def forward(self, image):
image_2 = image.permute(0, 3, 1, 2).clone()
avg_pool = nn.AvgPool2d(kernel_size=2, stride=(2, 2),
count_include_pad=False)
cb = avg_pool(image_2[:, 1, :, :].unsqueeze(1))
cr = avg_pool(image_2[:, 2, :, :].unsqueeze(1))
cb = cb.permute(0, 2, 3, 1)
cr = cr.permute(0, 2, 3, 1)
return image[:, :, :, 0], cb.squeeze(3), cr.squeeze(3)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (1 + 8 * x0 + 32 * x1), xmask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr0 + (5 + 8 * x0 + 32 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (17 + 8 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (21 + 8 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 + 8 * x0 + 32 * x1), xmask, eviction_policy
='evict_last')
tmp1 = tl.load(in_ptr0 + (6 + 8 * x0 + 32 * x1), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (18 + 8 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (22 + 8 * x0 + 32 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 2, 2), (4, 1, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](arg0_1, buf0, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 1, 2, 2), (4, 1, 2, 1), torch.float32)
triton_poi_fused_avg_pool2d_1[grid(16)](arg0_1, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return reinterpret_tensor(arg0_1, (4, 4, 4), (64, 16, 4), 0
), reinterpret_tensor(buf0, (4, 2, 2), (4, 2, 1), 0
), reinterpret_tensor(buf1, (4, 2, 2), (4, 2, 1), 0)
class chroma_subsamplingNew(nn.Module):
""" Chroma subsampling on CbCv channels
Input:
image(tensor): batch x height x width x 3
Output:
y(tensor): batch x height x width
cb(tensor): batch x height/2 x width/2
cr(tensor): batch x height/2 x width/2
"""
def __init__(self):
super(chroma_subsamplingNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0], output[1], output[2]
| mlomnitz/DifferentiableJPEG | chroma_subsampling | false | 16,096 | [
"MIT"
] | 86 | a5767feba955a1bcb78600135a09c36a806f6249 | https://github.com/mlomnitz/DifferentiableJPEG/tree/a5767feba955a1bcb78600135a09c36a806f6249 |
LearnedPositionalEncoding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/up/cuphmudfbm5xmuymbvibhn5q6e5y55prj6knqkmj43mx7ovcvcgz.py
# Topologically Sorted Source Nodes: [pos], Original ATen: [aten.arange]
# Source node to ATen node mapping:
# pos => iota
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
triton_poi_fused_arange_0 = async_compile.triton('triton_poi_fused_arange_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7g/c7ggkspjiryu6lrnyqmt3ltk6cvy3xv7hlniyify4whtbxpu3vcz.py
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
# Source node to ATen node mapping:
# embedding => embedding
# Graph fragment:
# %embedding : [num_users=2] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %expand), kwargs = {})
triton_poi_fused_embedding_1 = async_compile.triton('triton_poi_fused_embedding_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16)
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 100, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 100)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 100")
tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask)
tl.store(out_ptr0 + (x4), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/y3/cy35gsxm3xa2mi4ahfwlz6f5exunl2ig4pka6onlgazxeb4uuhe3.py
# Topologically Sorted Source Nodes: [x, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow]
# Source node to ATen node mapping:
# pow_1 => pow_1
# s => mean_1
# sub => sub
# u => mean
# x => add
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %embedding), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
triton_poi_fused_add_mean_pow_sub_2 = async_compile.triton('triton_poi_fused_add_mean_pow_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_pow_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x2), tmp16, xmask)
tl.store(out_ptr1 + (x2), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hy/chysp3ia3h42md5xyhzqsgskl34zzc6iy7yp2pqz3zla4xgfsa47.py
# Topologically Sorted Source Nodes: [x, u, sub, add_1, sqrt, x_1, mul, add_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# mul => mul
# sqrt => sqrt
# sub => sub
# u => mean
# x => add
# x_1 => div
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %embedding), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sqrt), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %div), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_4), kwargs = {})
triton_poi_fused_add_div_mean_mul_sqrt_sub_3 = async_compile.triton('triton_poi_fused_add_div_mean_mul_sqrt_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_sqrt_sub_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex
x4 = xindex % 64
x5 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tl.load(in_ptr2 + (x4), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + (x5), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + (x5), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x3), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (100, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [pos], Original ATen: [aten.arange]
stream0 = get_raw_stream(0)
triton_poi_fused_arange_0.run(buf0, 4, grid=grid(4), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
triton_poi_fused_embedding_1.run(buf0, primals_2, buf1, 64, grid=grid(64), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [x, u, sub, pow_1, s], Original ATen: [aten.add, aten.mean, aten.sub, aten.pow]
triton_poi_fused_add_mean_pow_sub_2.run(primals_1, buf1, buf2, buf3, 64, grid=grid(64), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x, u, sub, add_1, sqrt, x_1, mul, add_2], Original ATen: [aten.add, aten.mean, aten.sub, aten.sqrt, aten.div, aten.mul]
triton_poi_fused_add_div_mean_mul_sqrt_sub_3.run(primals_3, primals_1, buf1, buf2, buf3, primals_4, buf4, 256, grid=grid(256), stream=stream0)
del buf2
del buf3
del primals_4
return (buf4, primals_1, primals_3, reinterpret_tensor(buf0, (4, 1), (1, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((100, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class LayerNorm(nn.Module):
"""A layernorm module in the TF style (epsilon inside the square root)."""
def __init__(self, d_model, variance_epsilon=1e-12):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class LearnedPositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=100):
super(LearnedPositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.pos_embed = nn.Embedding(max_len, d_model)
self.layernorm = LayerNorm(d_model)
def forward(self, x):
seq_len = x.size(0)
pos = torch.arange(seq_len, dtype=torch.long, device=x.device)
pos = pos.unsqueeze(-1).expand(x.size()[:2])
x = x + self.pos_embed(pos)
return self.dropout(self.layernorm(x))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_arange_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16
x0 = xindex % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 100, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 100) | ~xmask,
'index out of bounds: 0 <= tmp4 < 100')
tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask)
tl.store(out_ptr0 + x4, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_mean_pow_sub_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 16
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x2, tmp16, xmask)
tl.store(out_ptr1 + x2, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_sqrt_sub_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x3 = xindex
x4 = xindex % 64
x5 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tl.load(in_ptr2 + x4, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr3 + x5, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr4 + x5, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp7 = 1e-12
tmp8 = tmp6 + tmp7
tmp9 = libdevice.sqrt(tmp8)
tmp10 = tmp5 / tmp9
tmp11 = tmp0 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x3, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (100, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_arange_0[grid(4)](buf0, 4, XBLOCK=4, num_warps=1,
num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_embedding_1[grid(64)](buf0, primals_2, buf1, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf3 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
triton_poi_fused_add_mean_pow_sub_2[grid(64)](primals_1, buf1, buf2,
buf3, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_sqrt_sub_3[grid(256)](primals_3,
primals_1, buf1, buf2, buf3, primals_4, buf4, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf2
del buf3
del primals_4
return buf4, primals_1, primals_3, reinterpret_tensor(buf0, (4, 1), (1,
1), 0), buf1
class LayerNorm(nn.Module):
"""A layernorm module in the TF style (epsilon inside the square root)."""
def __init__(self, d_model, variance_epsilon=1e-12):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.variance_epsilon = variance_epsilon
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.gamma * x + self.beta
class LearnedPositionalEncodingNew(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=100):
super(LearnedPositionalEncodingNew, self).__init__()
self.dropout = nn.Dropout(p=dropout)
self.pos_embed = nn.Embedding(max_len, d_model)
self.layernorm = LayerNorm(d_model)
def forward(self, input_0):
primals_2 = self.pos_embed.weight
primals_3 = self.layernorm.gamma
primals_4 = self.layernorm.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| minhnn-tiny/vietocr | LearnedPositionalEncoding | false | 16,097 | [
"Apache-2.0"
] | 307 | 80ed99b5d29df3f04c54ae394c525117846b503a | https://github.com/minhnn-tiny/vietocr/tree/80ed99b5d29df3f04c54ae394c525117846b503a |
ClassificationHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pd/cpdi7xzvi46tuzqsv24tk5nffrrhhudrzinef6vmc6ndavfymp7f.py
# Topologically Sorted Source Nodes: [norm, inputs], Original ATen: [aten.linalg_vector_norm, aten.div]
# Source node to ATen node mapping:
# inputs => div
# norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [-1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %pow_2), kwargs = {})
triton_poi_fused_div_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_div_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, inputs], Original ATen: [aten.linalg_vector_norm, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(buf0, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
class ClassificationHead(torch.nn.Linear):
def __init__(self, normalize, weights, biases=None):
output_size, input_size = weights.shape
super().__init__(input_size, output_size)
self.normalize = normalize
if weights is not None:
self.weight = torch.nn.Parameter(weights.clone())
if biases is not None:
self.bias = torch.nn.Parameter(biases.clone())
else:
self.bias = torch.nn.Parameter(torch.zeros_like(self.bias))
def forward(self, inputs):
if self.normalize:
inputs = inputs / inputs.norm(dim=-1, keepdim=True)
return super().forward(inputs)
def save(self, filename):
None
utils.torch_save(self, filename)
@classmethod
def load(cls, filename):
None
return utils.torch_load(filename)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'normalize': 4, 'weights': torch.rand([4, 4])}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_linalg_vector_norm_0[grid(256)](primals_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_2
del primals_3
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class ClassificationHeadNew(torch.nn.Linear):
def __init__(self, normalize, weights, biases=None):
output_size, input_size = weights.shape
super().__init__(input_size, output_size)
self.normalize = normalize
if weights is not None:
self.weight = torch.nn.Parameter(weights.clone())
if biases is not None:
self.bias = torch.nn.Parameter(biases.clone())
else:
self.bias = torch.nn.Parameter(torch.zeros_like(self.bias))
def save(self, filename):
None
utils.torch_save(self, filename)
@classmethod
def load(cls, filename):
None
return utils.torch_load(filename)
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| mlfoundations/wise-ft | ClassificationHead | false | 16,098 | [
"MIT"
] | 79 | 58b7a4b343b09dc06606aa929c2ef51accced8d1 | https://github.com/mlfoundations/wise-ft/tree/58b7a4b343b09dc06606aa929c2ef51accced8d1 |
M_VDNMixer | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ek/cekesse56z222vklldbcjcdpyhlgnxfldkdczm4xsok5kwwai7kt.py
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [-1]), kwargs = {})
triton_poi_fused_sum_0 = async_compile.triton('triton_poi_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_sum_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf0, (64, 1, 1), (1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import numpy as np
import torch.nn as nn
def to_torch(input):
return torch.from_numpy(input) if type(input) == np.ndarray else input
class M_VDNMixer(nn.Module):
"""
Computes total Q values given agent q values and global states.
:param args: (namespace) contains information about hyperparameters and algorithm configuration (unused).
:param num_agents: (int) number of agents in env
:param cent_obs_dim: (int) dimension of the centralized state (unused).
:param device: (torch.Device) torch device on which to do computations.
:param multidiscrete_list: (list) list of each action dimension if action space is multidiscrete
"""
def __init__(self, args, num_agents, cent_obs_dim, device,
multidiscrete_list=None):
"""
init mixer class
"""
super(M_VDNMixer, self).__init__()
self.device = device
self.num_agents = num_agents
if multidiscrete_list:
self.num_mixer_q_inps = sum(multidiscrete_list)
else:
self.num_mixer_q_inps = self.num_agents
def forward(self, agent_q_inps):
"""
Computes Q_tot by summing individual agent q values.
:param agent_q_inps: (torch.Tensor) individual agent q values
:return Q_tot: (torch.Tensor) computed Q_tot values
"""
agent_q_inps = to_torch(agent_q_inps)
return agent_q_inps.sum(dim=-1).view(-1, 1, 1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'args': _mock_config(), 'num_agents': 4, 'cent_obs_dim': 4,
'device': 0}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return reinterpret_tensor(buf0, (64, 1, 1), (1, 1, 1), 0),
def to_torch(input):
return torch.from_numpy(input) if type(input) == np.ndarray else input
class M_VDNMixerNew(nn.Module):
"""
Computes total Q values given agent q values and global states.
:param args: (namespace) contains information about hyperparameters and algorithm configuration (unused).
:param num_agents: (int) number of agents in env
:param cent_obs_dim: (int) dimension of the centralized state (unused).
:param device: (torch.Device) torch device on which to do computations.
:param multidiscrete_list: (list) list of each action dimension if action space is multidiscrete
"""
def __init__(self, args, num_agents, cent_obs_dim, device,
multidiscrete_list=None):
"""
init mixer class
"""
super(M_VDNMixerNew, self).__init__()
self.device = device
self.num_agents = num_agents
if multidiscrete_list:
self.num_mixer_q_inps = sum(multidiscrete_list)
else:
self.num_mixer_q_inps = self.num_agents
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| Maxtoq/off-policy | M_VDNMixer | false | 16,099 | [
"MIT"
] | 76 | e866f13a3d144a29552c38f104bcdfb411036138 | https://github.com/Maxtoq/off-policy/tree/e866f13a3d144a29552c38f104bcdfb411036138 |
ycbcr_to_rgb_jpeg | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/vx/cvxxlynxah34yu572a3wn3fl35yzhysx2xti2izljcb2iii5cxw4.py
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %primals_1), kwargs = {})
triton_poi_fused_add_0 = async_compile.triton('triton_poi_fused_add_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (3, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 3), (48, 12, 3, 1))
assert_size_stride(primals_3, (3, 3), (1, 3))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [add], Original ATen: [aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_0.run(primals_2, primals_1, buf0, 192, grid=grid(192), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
# Topologically Sorted Source Nodes: [result], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 3), (3, 1), 0), primals_3, out=buf1)
return (reinterpret_tensor(buf1, (4, 3, 4, 4), (48, 1, 12, 3), 0), reinterpret_tensor(buf0, (64, 3), (3, 1), 0), reinterpret_tensor(primals_3, (3, 3), (3, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 3), (48, 12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((3, 3), (1, 3), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
class ycbcr_to_rgb_jpeg(nn.Module):
""" Converts YCbCr image to RGB JPEG
Input:
image(tensor): batch x height x width x 3
Outpput:
result(tensor): batch x 3 x height x width
"""
def __init__(self):
super(ycbcr_to_rgb_jpeg, self).__init__()
matrix = np.array([[1.0, 0.0, 1.402], [1, -0.344136, -0.714136], [1,
1.772, 0]], dtype=np.float32).T
self.shift = nn.Parameter(torch.tensor([0, -128.0, -128.0]))
self.matrix = nn.Parameter(torch.from_numpy(matrix))
def forward(self, image):
result = torch.tensordot(image + self.shift, self.matrix, dims=1)
result.view(image.shape)
return result.permute(0, 3, 1, 2)
def get_inputs():
return [torch.rand([4, 4, 4, 3])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 3
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (3,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 3), (48, 12, 3, 1))
assert_size_stride(primals_3, (3, 3), (1, 3))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 3), (48, 12, 3, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_0[grid(192)](primals_2, primals_1, buf0, 192,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((64, 3), (3, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 3), (3, 1), 0),
primals_3, out=buf1)
return reinterpret_tensor(buf1, (4, 3, 4, 4), (48, 1, 12, 3), 0
), reinterpret_tensor(buf0, (64, 3), (3, 1), 0), reinterpret_tensor(
primals_3, (3, 3), (3, 1), 0)
class ycbcr_to_rgb_jpegNew(nn.Module):
""" Converts YCbCr image to RGB JPEG
Input:
image(tensor): batch x height x width x 3
Outpput:
result(tensor): batch x 3 x height x width
"""
def __init__(self):
super(ycbcr_to_rgb_jpegNew, self).__init__()
matrix = np.array([[1.0, 0.0, 1.402], [1, -0.344136, -0.714136], [1,
1.772, 0]], dtype=np.float32).T
self.shift = nn.Parameter(torch.tensor([0, -128.0, -128.0]))
self.matrix = nn.Parameter(torch.from_numpy(matrix))
def forward(self, input_0):
primals_1 = self.shift
primals_3 = self.matrix
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| mlomnitz/DifferentiableJPEG | ycbcr_to_rgb_jpeg | false | 16,100 | [
"MIT"
] | 86 | a5767feba955a1bcb78600135a09c36a806f6249 | https://github.com/mlomnitz/DifferentiableJPEG/tree/a5767feba955a1bcb78600135a09c36a806f6249 |
LightHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xb/cxbnvwmyej7sf2rljkgwmqzeiaeyvolxx3bj5zig4lmy6ppdcvq2.py
# Topologically Sorted Source Nodes: [pow_1, mean, x_std, add, x_norm, x], Original ATen: [aten.pow, aten.mean, aten.sqrt, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mean => mean
# pow_1 => pow_1
# x => mul
# x_norm => div
# x_std => sqrt
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [-1], True), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-08), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %div), kwargs = {})
triton_poi_fused_add_div_mean_mul_pow_sqrt_0 = async_compile.triton('triton_poi_fused_add_div_mean_mul_pow_sqrt_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_pow_sqrt_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp16 = 1e-08
tmp17 = tmp15 + tmp16
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tl.store(out_ptr0 + (x2), tmp19, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, mean, x_std, add, x_norm, x], Original ATen: [aten.pow, aten.mean, aten.sqrt, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_pow_sqrt_0.run(primals_2, primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
return (reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class RMSNorm(nn.Module):
"""An implementation of RMS Normalization.
# https://catalyst-team.github.io/catalyst/_modules/catalyst/contrib/nn/modules/rms_norm.html#RMSNorm
"""
def __init__(self, dimension: 'int', epsilon: 'float'=1e-08, is_bias:
'bool'=False):
"""
Args:
dimension (int): the dimension of the layer output to normalize
epsilon (float): an epsilon to prevent dividing by zero
in case the layer has zero variance. (default = 1e-8)
is_bias (bool): a boolean value whether to include bias term
while normalization
"""
super().__init__()
self.dimension = dimension
self.epsilon = epsilon
self.is_bias = is_bias
self.scale = nn.Parameter(torch.ones(self.dimension))
if self.is_bias:
self.bias = nn.Parameter(torch.zeros(self.dimension))
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x_std = torch.sqrt(torch.mean(x ** 2, -1, keepdim=True))
x_norm = x / (x_std + self.epsilon)
if self.is_bias:
return self.scale * x_norm + self.bias
return self.scale * x_norm
class LightHead(nn.Module):
def __init__(self, in_features, num_classes):
super().__init__()
self.norm = RMSNorm(in_features)
self.fc = nn.Linear(in_features, num_classes)
def forward(self, x):
x = self.norm(x)
x = self.fc(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'num_classes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mean_mul_pow_sqrt_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp3 = tmp2 * tmp2
tmp5 = tmp4 * tmp4
tmp6 = tmp3 + tmp5
tmp8 = tmp7 * tmp7
tmp9 = tmp6 + tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = libdevice.sqrt(tmp14)
tmp16 = 1e-08
tmp17 = tmp15 + tmp16
tmp18 = tmp1 / tmp17
tmp19 = tmp0 * tmp18
tl.store(out_ptr0 + x2, tmp19, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_mean_mul_pow_sqrt_0[grid(256)](primals_2,
primals_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_4, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf1)
del primals_4
return reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), primals_3
class RMSNorm(nn.Module):
"""An implementation of RMS Normalization.
# https://catalyst-team.github.io/catalyst/_modules/catalyst/contrib/nn/modules/rms_norm.html#RMSNorm
"""
def __init__(self, dimension: 'int', epsilon: 'float'=1e-08, is_bias:
'bool'=False):
"""
Args:
dimension (int): the dimension of the layer output to normalize
epsilon (float): an epsilon to prevent dividing by zero
in case the layer has zero variance. (default = 1e-8)
is_bias (bool): a boolean value whether to include bias term
while normalization
"""
super().__init__()
self.dimension = dimension
self.epsilon = epsilon
self.is_bias = is_bias
self.scale = nn.Parameter(torch.ones(self.dimension))
if self.is_bias:
self.bias = nn.Parameter(torch.zeros(self.dimension))
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
x_std = torch.sqrt(torch.mean(x ** 2, -1, keepdim=True))
x_norm = x / (x_std + self.epsilon)
if self.is_bias:
return self.scale * x_norm + self.bias
return self.scale * x_norm
class LightHeadNew(nn.Module):
def __init__(self, in_features, num_classes):
super().__init__()
self.norm = RMSNorm(in_features)
self.fc = nn.Linear(in_features, num_classes)
def forward(self, input_0):
primals_2 = self.norm.scale
primals_3 = self.fc.weight
primals_4 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| mohamedbakrey12/prpjectINDeepLearning | LightHead | false | 16,101 | [
"MIT"
] | 122 | b6106ee13ff9377e4a84bee4814bd54a34156930 | https://github.com/mohamedbakrey12/prpjectINDeepLearning/tree/b6106ee13ff9377e4a84bee4814bd54a34156930 |
TVLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ap/capgkvksys2oryteu5tgoxocuctkimjqhc5ur574b57sgrqpa62k.py
# Topologically Sorted Source Nodes: [x_diff, pow_1, y_diff, pow_2, add, diff, mean, sqrt, mean_1], Original ATen: [aten.sub, aten.pow, aten.add, aten.mean, aten.sqrt]
# Source node to ATen node mapping:
# add => add
# diff => add_1
# mean => mean
# mean_1 => mean_1
# pow_1 => pow_1
# pow_2 => pow_2
# sqrt => sqrt
# x_diff => sub
# y_diff => sub_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_2, %slice_4), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_6, %slice_8), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, 1e-08), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [1]), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%mean,), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sqrt,), kwargs = {})
triton_per_fused_add_mean_pow_sqrt_sub_0 = async_compile.triton('triton_per_fused_add_mean_pow_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mean_pow_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r1 = (rindex // 4) % 4
r2 = (rindex // 16)
r3 = rindex
tmp0 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (1 + r0)) + (1 + r0) * ((1 + r0) < (3)))), None)
tmp1 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp4 = tl.load(in_ptr0 + ((4*((3) * ((3) <= (1 + r1)) + (1 + r1) * ((1 + r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp10 = tl.load(in_ptr0 + (16 + (4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (1 + r0)) + (1 + r0) * ((1 + r0) < (3)))), None)
tmp11 = tl.load(in_ptr0 + (16 + (4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp14 = tl.load(in_ptr0 + (16 + (4*((3) * ((3) <= (1 + r1)) + (1 + r1) * ((1 + r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp20 = tl.load(in_ptr0 + (32 + (4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (1 + r0)) + (1 + r0) * ((1 + r0) < (3)))), None)
tmp21 = tl.load(in_ptr0 + (32 + (4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp24 = tl.load(in_ptr0 + (32 + (4*((3) * ((3) <= (1 + r1)) + (1 + r1) * ((1 + r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp30 = tl.load(in_ptr0 + (48 + (4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (1 + r0)) + (1 + r0) * ((1 + r0) < (3)))), None)
tmp31 = tl.load(in_ptr0 + (48 + (4*((3) * ((3) <= (r1)) + (r1) * ((r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp34 = tl.load(in_ptr0 + (48 + (4*((3) * ((3) <= (1 + r1)) + (1 + r1) * ((1 + r1) < (3)))) + (64*r2) + ((3) * ((3) <= (r0)) + (r0) * ((r0) < (3)))), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 - tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp15 = tmp14 - tmp11
tmp16 = tmp15 * tmp15
tmp17 = tmp13 + tmp16
tmp18 = tmp17 + tmp8
tmp19 = tmp9 + tmp18
tmp22 = tmp20 - tmp21
tmp23 = tmp22 * tmp22
tmp25 = tmp24 - tmp21
tmp26 = tmp25 * tmp25
tmp27 = tmp23 + tmp26
tmp28 = tmp27 + tmp8
tmp29 = tmp19 + tmp28
tmp32 = tmp30 - tmp31
tmp33 = tmp32 * tmp32
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp35
tmp37 = tmp33 + tmp36
tmp38 = tmp37 + tmp8
tmp39 = tmp29 + tmp38
tmp40 = 4.0
tmp41 = tmp39 / tmp40
tmp42 = libdevice.sqrt(tmp41)
tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK])
tmp45 = tl.sum(tmp43, 1)[:, None]
tmp46 = 64.0
tmp47 = tmp45 / tmp46
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp47, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_diff, pow_1, y_diff, pow_2, add, diff, mean, sqrt, mean_1], Original ATen: [aten.sub, aten.pow, aten.add, aten.mean, aten.sqrt]
stream0 = get_raw_stream(0)
triton_per_fused_add_mean_pow_sqrt_sub_0.run(buf2, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
from torch.nn import functional as F
class TVLoss(nn.Module):
def forward(self, input):
input = F.pad(input, (0, 1, 0, 1), 'replicate')
x_diff = input[..., :-1, 1:] - input[..., :-1, :-1]
y_diff = input[..., 1:, :-1] - input[..., :-1, :-1]
diff = x_diff ** 2 + y_diff ** 2 + 1e-08
return diff.mean(dim=1).sqrt().mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mean_pow_sqrt_sub_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 4
r1 = rindex // 4 % 4
r2 = rindex // 16
tmp0 = tl.load(in_ptr0 + (4 * (3 * (3 <= r1) + r1 * (r1 < 3)) + 64 * r2 +
(3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None)
tmp1 = tl.load(in_ptr0 + (4 * (3 * (3 <= r1) + r1 * (r1 < 3)) + 64 * r2 +
(3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp4 = tl.load(in_ptr0 + (4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 + r1 <
3)) + 64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp10 = tl.load(in_ptr0 + (16 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None)
tmp11 = tl.load(in_ptr0 + (16 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp14 = tl.load(in_ptr0 + (16 + 4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 +
r1 < 3)) + 64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp20 = tl.load(in_ptr0 + (32 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None)
tmp21 = tl.load(in_ptr0 + (32 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp24 = tl.load(in_ptr0 + (32 + 4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 +
r1 < 3)) + 64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp30 = tl.load(in_ptr0 + (48 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= 1 + r0) + (1 + r0) * (1 + r0 < 3))), None)
tmp31 = tl.load(in_ptr0 + (48 + 4 * (3 * (3 <= r1) + r1 * (r1 < 3)) +
64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp34 = tl.load(in_ptr0 + (48 + 4 * (3 * (3 <= 1 + r1) + (1 + r1) * (1 +
r1 < 3)) + 64 * r2 + (3 * (3 <= r0) + r0 * (r0 < 3))), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp5 = tmp4 - tmp1
tmp6 = tmp5 * tmp5
tmp7 = tmp3 + tmp6
tmp8 = 1e-08
tmp9 = tmp7 + tmp8
tmp12 = tmp10 - tmp11
tmp13 = tmp12 * tmp12
tmp15 = tmp14 - tmp11
tmp16 = tmp15 * tmp15
tmp17 = tmp13 + tmp16
tmp18 = tmp17 + tmp8
tmp19 = tmp9 + tmp18
tmp22 = tmp20 - tmp21
tmp23 = tmp22 * tmp22
tmp25 = tmp24 - tmp21
tmp26 = tmp25 * tmp25
tmp27 = tmp23 + tmp26
tmp28 = tmp27 + tmp8
tmp29 = tmp19 + tmp28
tmp32 = tmp30 - tmp31
tmp33 = tmp32 * tmp32
tmp35 = tmp34 - tmp31
tmp36 = tmp35 * tmp35
tmp37 = tmp33 + tmp36
tmp38 = tmp37 + tmp8
tmp39 = tmp29 + tmp38
tmp40 = 4.0
tmp41 = tmp39 / tmp40
tmp42 = libdevice.sqrt(tmp41)
tmp43 = tl.broadcast_to(tmp42, [XBLOCK, RBLOCK])
tmp45 = tl.sum(tmp43, 1)[:, None]
tmp46 = 64.0
tmp47 = tmp45 / tmp46
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp47, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_add_mean_pow_sqrt_sub_0[grid(1)](buf2, arg0_1, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf2,
class TVLossNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| momo-the-monster/vqgan-clip-app | TVLoss | false | 16,102 | [
"MIT"
] | 63 | 56cfc0a53928d6d8f90ed8c79439afb4430bc118 | https://github.com/momo-the-monster/vqgan-clip-app/tree/56cfc0a53928d6d8f90ed8c79439afb4430bc118 |
ClassicalConv2 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/z5/cz54fd4lnstery2s23gtcu24u56amvioba7xx5f6yrm3j5jhnzkp.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.mul]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => mul
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, %convolution), kwargs = {})
triton_poi_fused_convolution_mul_0 = async_compile.triton('triton_poi_fused_convolution_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 81) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/r7/cr7edbpcajpm4fizch3pwqtferqer2jekzsm7zfhmzrn7ctyqb4n.py
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.mul]
# Source node to ATen node mapping:
# x_5 => convolution_2
# x_6 => mul_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, %convolution_2), kwargs = {})
triton_poi_fused_convolution_mul_1 = async_compile.triton('triton_poi_fused_convolution_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 64) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp3, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nk/cnk76km5dj324hs6t2acvaq46ci3dozrezinl4uqldysatfoqrvi.py
# Topologically Sorted Source Nodes: [x_9, output], Original ATen: [aten.convolution, aten._log_softmax]
# Source node to ATen node mapping:
# output => amax, exp, sub, sum_1
# x_9 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%convolution_4, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_4, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_per_fused__log_softmax_convolution_2 = async_compile.triton('triton_per_fused__log_softmax_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_convolution_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 196
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r2 = rindex
x0 = xindex % 49
x1 = (xindex // 49)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (49*r2) + (490*x1)), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr1 + (r2), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, float("-inf"))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(rmask & xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + (x3), tmp6, xmask)
tl.store(out_ptr1 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vx/cvxsaup4kcaaq74cljldkb2phb2p3mlgio5dcqu3fbwwq3rtlxqb.py
# Topologically Sorted Source Nodes: [x_9, output, squeeze], Original ATen: [aten.convolution, aten._log_softmax, aten.squeeze, aten._log_softmax_backward_data]
# Source node to ATen node mapping:
# output => log, sub, sub_1
# squeeze => squeeze
# x_9 => convolution_4
# Graph fragment:
# %convolution_4 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%mul_3, %primals_10, %primals_11, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%convolution_4, %amax), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %squeeze : [num_users=1] = call_function[target=torch.ops.aten.squeeze.default](args = (%sub_1,), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub_1,), kwargs = {})
triton_poi_fused__log_softmax__log_softmax_backward_data_convolution_squeeze_3 = async_compile.triton('triton_poi_fused__log_softmax__log_softmax_backward_data_convolution_squeeze_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax__log_softmax_backward_data_convolution_squeeze_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax__log_softmax_backward_data_convolution_squeeze_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1960
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 49) % 10
x0 = xindex % 49
x2 = (xindex // 490)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + (49*x2)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x0 + (49*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp4 - tmp6
tmp8 = tl_math.exp(tmp7)
tl.store(out_ptr0 + (x3), tmp7, xmask)
tl.store(out_ptr1 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (4, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4, ), (1, ))
assert_size_stride(primals_10, (10, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_11, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
buf0 = torch.ops.aten.avg_pool2d.default(primals_1, [6, 6], [6, 6], [0, 0], False, True, None)
del primals_1
buf1 = buf0
del buf0
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 9, 9), (324, 81, 9, 1))
buf3 = buf2; del buf2 # reuse
buf4 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_mul_0.run(buf3, primals_3, buf4, 1296, grid=grid(1296), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 9, 9), (324, 81, 9, 1))
buf6 = buf5; del buf5 # reuse
buf7 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.mul]
triton_poi_fused_convolution_mul_0.run(buf6, primals_5, buf7, 1296, grid=grid(1296), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 8, 8), (256, 64, 8, 1))
buf9 = buf8; del buf8 # reuse
buf10 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5, x_6], Original ATen: [aten.convolution, aten.mul]
triton_poi_fused_convolution_mul_1.run(buf9, primals_7, buf10, 1024, grid=grid(1024), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 8, 8), (256, 64, 8, 1))
buf12 = buf11; del buf11 # reuse
buf13 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7, x_8], Original ATen: [aten.convolution, aten.mul]
triton_poi_fused_convolution_mul_1.run(buf12, primals_9, buf13, 1024, grid=grid(1024), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 10, 7, 7), (490, 49, 7, 1))
buf15 = empty_strided_cuda((4, 1, 7, 7), (49, 196, 7, 1), torch.float32)
buf16 = empty_strided_cuda((4, 1, 7, 7), (49, 196, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_9, output], Original ATen: [aten.convolution, aten._log_softmax]
triton_per_fused__log_softmax_convolution_2.run(buf14, primals_11, buf15, buf16, 196, 10, grid=grid(196), stream=stream0)
buf17 = empty_strided_cuda((4, 10, 7, 7), (490, 49, 7, 1), torch.float32)
buf18 = empty_strided_cuda((4, 10, 7, 7), (490, 49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_9, output, squeeze], Original ATen: [aten.convolution, aten._log_softmax, aten.squeeze, aten._log_softmax_backward_data]
triton_poi_fused__log_softmax__log_softmax_backward_data_convolution_squeeze_3.run(buf14, primals_11, buf15, buf16, buf17, buf18, 1960, grid=grid(1960), stream=stream0)
del buf14
del buf15
del buf16
del primals_11
return (buf17, primals_2, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf4, buf6, buf7, buf9, buf10, buf12, buf13, buf18, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 4, 2, 2), (16, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.utils.prune
import torch.backends.cudnn
import torch.cuda
import torch.nn
import torch.utils.data
class ClassicalConv2(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 4, 2, 1)
self.conv2 = nn.Conv2d(4, 4, 1, 1)
self.conv3 = nn.Conv2d(4, 4, 2, 1)
self.conv4 = nn.Conv2d(4, 4, 1, 1)
self.conv5 = nn.Conv2d(4, 10, 2, 1)
self.act = lambda x: x * x
def forward(self, x):
x = F.avg_pool2d(x, 6)
x = self.conv1(x)
x = self.act(x)
x = self.conv2(x)
x = self.act(x)
x = self.conv3(x)
x = self.act(x)
x = self.conv4(x)
x = self.act(x)
x = self.conv5(x)
output = F.log_softmax(x, dim=1)
return output.squeeze()
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.utils.prune
import torch.backends.cudnn
import torch.cuda
import torch.nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_0(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_1(in_out_ptr0, in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 64 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tmp2 * tmp2
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp3, xmask)
@triton.jit
def triton_per_fused__log_softmax_convolution_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 196
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x0 = xindex % 49
x1 = xindex // 49
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 49 * r2 + 490 * x1), rmask & xmask,
other=0.0)
tmp1 = tl.load(in_ptr1 + r2, rmask, eviction_policy='evict_last', other=0.0
)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, float('-inf'))
tmp6 = triton_helpers.max2(tmp5, 1)[:, None]
tmp7 = tmp2 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.where(rmask & xmask, tmp9, 0)
tmp12 = tl.sum(tmp11, 1)[:, None]
tl.store(out_ptr0 + x3, tmp6, xmask)
tl.store(out_ptr1 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused__log_softmax__log_softmax_backward_data_convolution_squeeze_3(
in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1960
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 49 % 10
x0 = xindex % 49
x2 = xindex // 490
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x0 + 49 * x2), xmask, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr3 + (x0 + 49 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = tl_math.log(tmp5)
tmp7 = tmp4 - tmp6
tmp8 = tl_math.exp(tmp7)
tl.store(out_ptr0 + x3, tmp7, xmask)
tl.store(out_ptr1 + x3, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (4, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (4,), (1,))
assert_size_stride(primals_10, (10, 4, 2, 2), (16, 4, 2, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten.avg_pool2d.default(primals_1, [6, 6], [6, 6],
[0, 0], False, True, None)
del primals_1
buf1 = buf0
del buf0
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 9, 9), (324, 81, 9, 1))
buf3 = buf2
del buf2
buf4 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_0[grid(1296)](buf3, primals_3,
buf4, 1296, XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf5 = extern_kernels.convolution(buf4, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 9, 9), (324, 81, 9, 1))
buf6 = buf5
del buf5
buf7 = empty_strided_cuda((4, 4, 9, 9), (324, 81, 9, 1), torch.float32)
triton_poi_fused_convolution_mul_0[grid(1296)](buf6, primals_5,
buf7, 1296, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf8 = extern_kernels.convolution(buf7, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 4, 8, 8), (256, 64, 8, 1))
buf9 = buf8
del buf8
buf10 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32
)
triton_poi_fused_convolution_mul_1[grid(1024)](buf9, primals_7,
buf10, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf11 = extern_kernels.convolution(buf10, primals_8, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 4, 8, 8), (256, 64, 8, 1))
buf12 = buf11
del buf11
buf13 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32
)
triton_poi_fused_convolution_mul_1[grid(1024)](buf12, primals_9,
buf13, 1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_9
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 10, 7, 7), (490, 49, 7, 1))
buf15 = empty_strided_cuda((4, 1, 7, 7), (49, 196, 7, 1), torch.float32
)
buf16 = empty_strided_cuda((4, 1, 7, 7), (49, 196, 7, 1), torch.float32
)
triton_per_fused__log_softmax_convolution_2[grid(196)](buf14,
primals_11, buf15, buf16, 196, 10, XBLOCK=1, num_warps=2,
num_stages=1)
buf17 = empty_strided_cuda((4, 10, 7, 7), (490, 49, 7, 1), torch.
float32)
buf18 = empty_strided_cuda((4, 10, 7, 7), (490, 49, 7, 1), torch.
float32)
triton_poi_fused__log_softmax__log_softmax_backward_data_convolution_squeeze_3[
grid(1960)](buf14, primals_11, buf15, buf16, buf17, buf18, 1960,
XBLOCK=128, num_warps=4, num_stages=1)
del buf14
del buf15
del buf16
del primals_11
return (buf17, primals_2, primals_4, primals_6, primals_8, primals_10,
buf1, buf3, buf4, buf6, buf7, buf9, buf10, buf12, buf13, buf18)
class ClassicalConv2New(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 4, 2, 1)
self.conv2 = nn.Conv2d(4, 4, 1, 1)
self.conv3 = nn.Conv2d(4, 4, 2, 1)
self.conv4 = nn.Conv2d(4, 4, 1, 1)
self.conv5 = nn.Conv2d(4, 10, 2, 1)
self.act = lambda x: x * x
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| mit-han-lab/pytorch-quantum | ClassicalConv2 | false | 16,103 | [
"MIT"
] | 98 | 05cf000d689307f6b1fe02d12744ad455685935b | https://github.com/mit-han-lab/pytorch-quantum/tree/05cf000d689307f6b1fe02d12744ad455685935b |
Anchor3DHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dl/cdl63wpyh2qbdvol3rnwt477ib45bzxuxyqip4uo7tpclvu7kphq.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1536
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = (yindex // 384)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (384*x2) + (1572864*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lz/clza44aawh56ovvj2bxwrrs5awyxtdsljsznobv25cfir7zmva5m.py
# Topologically Sorted Source Nodes: [cls_score], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# cls_score => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 2
y1 = (yindex // 2)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (2*x2) + (8192*y1)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp2, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2e/c2epl7psn644lunhwnw7qf56ft5crbfr7hpx3vuhs26gxjg6depu.py
# Topologically Sorted Source Nodes: [bbox_pred], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# bbox_pred => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 56
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 14
y1 = (yindex // 14)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (14*x2) + (57344*y1)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp2, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pn/cpngdclw7lzjwmfou3d3kip6qoxuvwgrgxlt5gs2kd5udjsfz5ny.py
# Topologically Sorted Source Nodes: [dir_cls_preds], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# dir_cls_preds => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16384*y1)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp2, ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (2, 384, 1, 1), (384, 1, 1, 1))
assert_size_stride(primals_2, (2, ), (1, ))
assert_size_stride(primals_3, (4, 384, 64, 64), (1572864, 4096, 64, 1))
assert_size_stride(primals_4, (14, 384, 1, 1), (384, 1, 1, 1))
assert_size_stride(primals_5, (14, ), (1, ))
assert_size_stride(primals_6, (4, 384, 1, 1), (384, 1, 1, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 384, 64, 64), (1572864, 1, 24576, 384), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 1536, 4096, grid=grid(1536, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [cls_score], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 2, 64, 64), (8192, 1, 128, 2))
buf2 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cls_score], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf1, primals_2, buf2, 8, 4096, grid=grid(8, 4096), stream=stream0)
del buf1
del primals_2
# Topologically Sorted Source Nodes: [bbox_pred], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 14, 64, 64), (57344, 1, 896, 14))
buf4 = empty_strided_cuda((4, 14, 64, 64), (57344, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [bbox_pred], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf3, primals_5, buf4, 56, 4096, grid=grid(56, 4096), stream=stream0)
del buf3
del primals_5
# Topologically Sorted Source Nodes: [dir_cls_preds], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf0, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 64, 64), (16384, 1, 256, 4))
buf6 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [dir_cls_preds], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf5, primals_7, buf6, 16, 4096, grid=grid(16, 4096), stream=stream0)
del buf5
del primals_7
return (buf2, buf4, buf6, primals_1, buf0, primals_4, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((2, 384, 1, 1), (384, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 384, 64, 64), (1572864, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((14, 384, 1, 1), (384, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((14, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 384, 1, 1), (384, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
import torch.nn as nn
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-06):
"""Calculate overlap between two set of bboxes.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or "iof" (intersection over
foreground).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] -
bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] -
bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2])
rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[...,
None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[...,
None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
def box3d_to_bev(boxes3d):
"""Convert rotated 3d boxes in XYZWHDR format to BEV in XYWHR format.
Args:
boxes3d (torch.Tensor): Rotated boxes in XYZWHDR format.
Returns:
torch.Tensor: Converted BEV boxes in XYWHR format.
"""
return boxes3d[:, [0, 1, 3, 4, 6]]
def limit_period(val, offset=0.5, period=np.pi):
"""Limit the value into a period for periodic function.
Args:
val (torch.Tensor): The value to be converted.
offset (float, optional): Offset to set the value range. Defaults to 0.5.
period ([type], optional): Period of the value. Defaults to np.pi.
Returns:
torch.Tensor: Value in the range of [-offset * period, (1-offset) * period]
"""
return val - torch.floor(val / period + offset) * period
def box3d_to_bev2d(boxes3d):
"""Convert rotated 3d boxes in XYZWHDR format to neareset BEV without
rotation.
Args:
boxes3d (torch.Tensor): Rotated boxes in XYZWHDR format.
Returns:
torch.Tensor: Converted BEV boxes in XYWH format.
"""
bev_rotated_boxes = box3d_to_bev(boxes3d)
rotations = bev_rotated_boxes[:, -1]
normed_rotations = torch.abs(limit_period(rotations, 0.5, np.pi))
conditions = (normed_rotations > np.pi / 4)[..., None]
bboxes_xywh = torch.where(conditions, bev_rotated_boxes[:, [0, 1, 3, 2]
], bev_rotated_boxes[:, :4])
centers = bboxes_xywh[:, :2]
dims = bboxes_xywh[:, 2:]
bev_boxes = torch.cat([centers - dims / 2, centers + dims / 2], dim=-1)
return bev_boxes
def xywhr_to_xyxyr(boxes_xywhr):
"""Convert rotated boxes in XYWHR format to XYXYR format.
Args:
boxes_xywhr (torch.Tensor): Rotated boxes in XYWHR format.
Returns:
torch.Tensor: Converted boxes in XYXYR format.
"""
boxes = torch.zeros_like(boxes_xywhr)
half_w = boxes_xywhr[:, 2] / 2
half_h = boxes_xywhr[:, 3] / 2
boxes[:, 0] = boxes_xywhr[:, 0] - half_w
boxes[:, 1] = boxes_xywhr[:, 1] - half_h
boxes[:, 2] = boxes_xywhr[:, 0] + half_w
boxes[:, 3] = boxes_xywhr[:, 1] + half_h
boxes[:, 4] = boxes_xywhr[:, 4]
return boxes
def multiclass_nms(boxes, scores, score_thr):
"""Multi-class nms for 3D boxes.
Args:
boxes (torch.Tensor): Multi-level boxes with shape (N, M).
M is the dimensions of boxes.
scores (torch.Tensor): Multi-level boxes with shape
(N, ). N is the number of boxes.
score_thr (float): Score threshold to filter boxes with low
confidence.
Returns:
list[torch.Tensor]: Return a list of indices after nms,
with an entry for each class.
"""
idxs = []
for i in range(scores.shape[1]):
cls_inds = scores[:, i] > score_thr
if not cls_inds.any():
idxs.append(torch.tensor([], dtype=torch.long, device=cls_inds.
device))
continue
orig_idx = torch.arange(cls_inds.shape[0], device=cls_inds.device,
dtype=torch.long)[cls_inds]
_scores = scores[cls_inds, i]
_boxes = boxes[cls_inds, :]
_bev = xywhr_to_xyxyr(box3d_to_bev(_boxes))
idx = nms(_bev, _scores, 0.01)
idxs.append(orig_idx[idx])
return idxs
class Anchor3DRangeGenerator(object):
"""3D Anchor Generator by range.
This anchor generator generates anchors by the given range in different
feature levels.
Args:
ranges (list[list[float]]): Ranges of different anchors.
The ranges are the same across different feature levels. But may
vary for different anchor sizes if size_per_range is True.
sizes (list[list[float]]): 3D sizes of anchors.
rotations (list[float]): Rotations of anchors in a feature grid.
"""
def __init__(self, ranges, sizes=[[1.6, 3.9, 1.56]], rotations=[0,
1.5707963]):
if len(sizes) != len(ranges):
assert len(ranges) == 1
ranges = ranges * len(sizes)
assert len(ranges) == len(sizes)
self.sizes = sizes
self.ranges = ranges
self.rotations = rotations
@property
def num_base_anchors(self):
"""list[int]: Total number of base anchors in a feature grid."""
num_rot = len(self.rotations)
num_size = torch.tensor(self.sizes).reshape(-1, 3).size(0)
return num_rot * num_size
def grid_anchors(self, featmap_size, device='cuda'):
"""Generate grid anchors of a single level feature map.
This function is usually called by method ``self.grid_anchors``.
Args:
featmap_size (tuple[int]): Size of the feature map.
device (str, optional): Device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature map.
"""
mr_anchors = []
for anchor_range, anchor_size in zip(self.ranges, self.sizes):
mr_anchors.append(self.anchors_single_range(featmap_size,
anchor_range, anchor_size, self.rotations, device=device))
mr_anchors = torch.cat(mr_anchors, dim=-3)
return mr_anchors
def anchors_single_range(self, feature_size, anchor_range, sizes=[[1.6,
3.9, 1.56]], rotations=[0, 1.5707963], device='cuda'):
"""Generate anchors in a single range.
Args:
feature_size (list[float] | tuple[float]): Feature map size. It is
either a list of a tuple of [D, H, W](in order of z, y, and x).
anchor_range (torch.Tensor | list[float]): Range of anchors with
shape [6]. The order is consistent with that of anchors, i.e.,
(x_min, y_min, z_min, x_max, y_max, z_max).
sizes (list[list] | np.ndarray | torch.Tensor): Anchor size with
shape [N, 3], in order of x, y, z.
rotations (list[float] | np.ndarray | torch.Tensor): Rotations of
anchors in a single feature grid.
device (str): Devices that the anchors will be put on.
Returns:
torch.Tensor: Anchors with shape [*feature_size, num_sizes, num_rots, 7].
"""
if len(feature_size) == 2:
feature_size = [1, feature_size[0], feature_size[1]]
anchor_range = torch.tensor(anchor_range, device=device)
z_centers = torch.linspace(anchor_range[2], anchor_range[5],
feature_size[0], device=device)
y_centers = torch.linspace(anchor_range[1], anchor_range[4],
feature_size[1], device=device)
x_centers = torch.linspace(anchor_range[0], anchor_range[3],
feature_size[2], device=device)
sizes = torch.tensor(sizes, device=device).reshape(-1, 3)
rotations = torch.tensor(rotations, device=device)
rets = torch.meshgrid(x_centers, y_centers, z_centers, rotations)
rets = list(rets)
for i in range(len(rets)):
rets[i] = rets[i].unsqueeze(-2).unsqueeze(-1)
sizes = sizes.reshape([1, 1, 1, 1, 1, 3])
tile_size_shape = list(rets[0].shape)
tile_size_shape[3] = 1
sizes = sizes.repeat(tile_size_shape)
rets.insert(3, sizes)
ret = torch.cat(rets, dim=-1).permute([2, 1, 0, 3, 4, 5])
return ret
class BBoxCoder(object):
"""Bbox Coder for 3D boxes.
Args:
code_size (int): The dimension of boxes to be encoded.
"""
def __init__(self):
super(BBoxCoder, self).__init__()
@staticmethod
def encode(src_boxes, dst_boxes):
"""Get box regression transformation deltas (dx, dy, dz, dw, dh, dl, dr,
dv*) that can be used to transform the `src_boxes` into the
`target_boxes`.
Args:
src_boxes (torch.Tensor): source boxes, e.g., object proposals.
dst_boxes (torch.Tensor): target of the transformation, e.g.,
ground-truth boxes.
Returns:
torch.Tensor: Box transformation deltas.
"""
xa, ya, za, wa, la, ha, ra = torch.split(src_boxes, 1, dim=-1)
xg, yg, zg, wg, lg, hg, rg = torch.split(dst_boxes, 1, dim=-1)
za = za + ha / 2
zg = zg + hg / 2
diagonal = torch.sqrt(la ** 2 + wa ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha
lt = torch.log(lg / la)
wt = torch.log(wg / wa)
ht = torch.log(hg / ha)
rt = rg - ra
return torch.cat([xt, yt, zt, wt, lt, ht, rt], dim=-1)
@staticmethod
def decode(anchors, deltas):
"""Apply transformation `deltas` (dx, dy, dz, dw, dh, dl, dr, dv*) to
`boxes`.
Args:
anchors (torch.Tensor): Parameters of anchors with shape (N, 7).
deltas (torch.Tensor): Encoded boxes with shape
(N, 7+n) [x, y, z, w, l, h, r, velo*].
Returns:
torch.Tensor: Decoded boxes.
"""
xa, ya, za, wa, la, ha, ra = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt = torch.split(deltas, 1, dim=-1)
za = za + ha / 2
diagonal = torch.sqrt(la ** 2 + wa ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
lg = torch.exp(lt) * la
wg = torch.exp(wt) * wa
hg = torch.exp(ht) * ha
rg = rt + ra
zg = zg - hg / 2
return torch.cat([xg, yg, zg, wg, lg, hg, rg], dim=-1)
class Anchor3DHead(nn.Module):
def __init__(self, num_classes=1, in_channels=384, feat_channels=384,
nms_pre=100, score_thr=0.1, dir_offset=0, ranges=[[0, -40.0, -3,
70.0, 40.0, 1]], sizes=[[0.6, 1.0, 1.5]], rotations=[0, 1.57],
iou_thr=[[0.35, 0.5]]):
super().__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.nms_pre = nms_pre
self.score_thr = score_thr
self.dir_offset = dir_offset
self.iou_thr = iou_thr
if len(self.iou_thr) != num_classes:
assert len(self.iou_thr) == 1
self.iou_thr = self.iou_thr * num_classes
assert len(self.iou_thr) == num_classes
self.anchor_generator = Anchor3DRangeGenerator(ranges=ranges, sizes
=sizes, rotations=rotations)
self.num_anchors = self.anchor_generator.num_base_anchors
self.bbox_coder = BBoxCoder()
self.box_code_size = 7
self.fp16_enabled = False
self.cls_out_channels = self.num_anchors * self.num_classes
self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1)
self.conv_reg = nn.Conv2d(self.feat_channels, self.num_anchors *
self.box_code_size, 1)
self.conv_dir_cls = nn.Conv2d(self.feat_channels, self.num_anchors *
2, 1)
self.init_weights()
@staticmethod
def bias_init_with_prob(prior_prob):
"""Initialize conv/fc bias value according to giving probablity."""
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
return bias_init
@staticmethod
def normal_init(module, mean=0, std=1, bias=0):
nn.init.normal_(module.weight, mean, std)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def init_weights(self):
"""Initialize the weights of head."""
bias_cls = self.bias_init_with_prob(0.01)
self.normal_init(self.conv_cls, std=0.01, bias=bias_cls)
self.normal_init(self.conv_reg, std=0.01)
def forward(self, x):
"""Forward function on a feature map.
Args:
x (torch.Tensor): Input features.
Returns:
tuple[torch.Tensor]: Contain score of each class, bbox regression and direction classification predictions.
"""
cls_score = self.conv_cls(x)
bbox_pred = self.conv_reg(x)
dir_cls_preds = None
dir_cls_preds = self.conv_dir_cls(x)
return cls_score, bbox_pred, dir_cls_preds
def assign_bboxes(self, pred_bboxes, target_bboxes):
"""Assigns target bboxes to given anchors.
Args:
pred_bboxes (torch.Tensor): Bbox predictions (anchors).
target_bboxes (torch.Tensor): Bbox targets.
Returns:
torch.Tensor: Assigned target bboxes for each given anchor.
torch.Tensor: Flat index of matched targets.
torch.Tensor: Index of positive matches.
torch.Tensor: Index of negative matches.
"""
anchors = [self.anchor_generator.grid_anchors(pred_bboxes.shape[-2:
], device=pred_bboxes.device) for _ in range(len(target_bboxes))]
anchors_cnt = torch.tensor(anchors[0].shape[:-1]).prod()
rot_angles = anchors[0].shape[-2]
assigned_bboxes, target_idxs, pos_idxs, neg_idxs = [], [], [], []
def flatten_idx(idx, j):
"""Inject class dimension in the given indices (...
z * rot_angles + x) --> (.. z * num_classes * rot_angles + j * rot_angles + x)
"""
z = idx // rot_angles
x = idx % rot_angles
return z * self.num_classes * rot_angles + j * rot_angles + x
idx_off = 0
for i in range(len(target_bboxes)):
for j, (neg_th, pos_th) in enumerate(self.iou_thr):
anchors_stride = anchors[i][..., j, :, :].reshape(-1, self.
box_code_size)
if target_bboxes[i].shape[0] == 0:
assigned_bboxes.append(torch.zeros((0, 7), device=
pred_bboxes.device))
target_idxs.append(torch.zeros((0,), dtype=torch.long,
device=pred_bboxes.device))
pos_idxs.append(torch.zeros((0,), dtype=torch.long,
device=pred_bboxes.device))
neg_idxs.append(torch.zeros((0,), dtype=torch.long,
device=pred_bboxes.device))
continue
overlaps = bbox_overlaps(box3d_to_bev2d(target_bboxes[i]),
box3d_to_bev2d(anchors_stride))
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
gt_max_overlaps, _ = overlaps.max(dim=1)
pos_idx = max_overlaps >= pos_th
neg_idx = (max_overlaps >= 0) & (max_overlaps < neg_th)
for k in range(len(target_bboxes[i])):
if gt_max_overlaps[k] >= neg_th:
pos_idx[overlaps[k, :] == gt_max_overlaps[k]] = True
assigned_bboxes.append(self.bbox_coder.encode(
anchors_stride[pos_idx], target_bboxes[i][
argmax_overlaps[pos_idx]]))
target_idxs.append(argmax_overlaps[pos_idx] + idx_off)
pos_idx = flatten_idx(pos_idx.nonzero(as_tuple=False).
squeeze(-1), j) + i * anchors_cnt
neg_idx = flatten_idx(neg_idx.nonzero(as_tuple=False).
squeeze(-1), j) + i * anchors_cnt
pos_idxs.append(pos_idx)
neg_idxs.append(neg_idx)
idx_off += len(target_bboxes[i])
return torch.cat(assigned_bboxes, axis=0), torch.cat(target_idxs,
axis=0), torch.cat(pos_idxs, axis=0), torch.cat(neg_idxs, axis=0)
def get_bboxes(self, cls_scores, bbox_preds, dir_preds):
"""Get bboxes of anchor head.
Args:
cls_scores (list[torch.Tensor]): Class scores.
bbox_preds (list[torch.Tensor]): Bbox predictions.
dir_cls_preds (list[torch.Tensor]): Direction
class predictions.
Returns:
tuple[torch.Tensor]: Prediction results of batches
(bboxes, scores, labels).
"""
bboxes, scores, labels = [], [], []
for cls_score, bbox_pred, dir_pred in zip(cls_scores, bbox_preds,
dir_preds):
b, s, l = self.get_bboxes_single(cls_score, bbox_pred, dir_pred)
bboxes.append(b)
scores.append(s)
labels.append(l)
return bboxes, scores, labels
def get_bboxes_single(self, cls_scores, bbox_preds, dir_preds):
"""Get bboxes of anchor head.
Args:
cls_scores (list[torch.Tensor]): Class scores.
bbox_preds (list[torch.Tensor]): Bbox predictions.
dir_cls_preds (list[torch.Tensor]): Direction
class predictions.
Returns:
tuple[torch.Tensor]: Prediction results of batches
(bboxes, scores, labels).
"""
assert cls_scores.size()[-2:] == bbox_preds.size()[-2:]
assert cls_scores.size()[-2:] == dir_preds.size()[-2:]
anchors = self.anchor_generator.grid_anchors(cls_scores.shape[-2:],
device=cls_scores.device)
anchors = anchors.reshape(-1, self.box_code_size)
dir_preds = dir_preds.permute(1, 2, 0).reshape(-1, 2)
dir_scores = torch.max(dir_preds, dim=-1)[1]
cls_scores = cls_scores.permute(1, 2, 0).reshape(-1, self.num_classes)
scores = cls_scores.sigmoid()
bbox_preds = bbox_preds.permute(1, 2, 0).reshape(-1, self.box_code_size
)
if scores.shape[0] > self.nms_pre:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(self.nms_pre)
anchors = anchors[topk_inds, :]
bbox_preds = bbox_preds[topk_inds, :]
scores = scores[topk_inds, :]
dir_scores = dir_scores[topk_inds]
bboxes = self.bbox_coder.decode(anchors, bbox_preds)
idxs = multiclass_nms(bboxes, scores, self.score_thr)
labels = [torch.full((len(idxs[i]),), i, dtype=torch.long) for i in
range(self.num_classes)]
labels = torch.cat(labels)
scores = [scores[idxs[i], i] for i in range(self.num_classes)]
scores = torch.cat(scores)
idxs = torch.cat(idxs)
bboxes = bboxes[idxs]
dir_scores = dir_scores[idxs]
if bboxes.shape[0] > 0:
dir_rot = limit_period(bboxes[..., 6] - self.dir_offset, 1, np.pi)
bboxes[..., 6] = dir_rot + self.dir_offset + np.pi * dir_scores
return bboxes, scores, labels
def get_inputs():
return [torch.rand([4, 384, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 1536
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 384
y1 = yindex // 384
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 384 * x2 + 1572864 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 8
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 2
y1 = yindex // 2
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 2 * x2 + 8192 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 56
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 14
y1 = yindex // 14
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 14 * x2 + 57344 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask)
@triton.jit
def triton_poi_fused_convolution_3(in_ptr0, in_ptr1, out_ptr0, ynumel,
xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp2, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (2, 384, 1, 1), (384, 1, 1, 1))
assert_size_stride(primals_2, (2,), (1,))
assert_size_stride(primals_3, (4, 384, 64, 64), (1572864, 4096, 64, 1))
assert_size_stride(primals_4, (14, 384, 1, 1), (384, 1, 1, 1))
assert_size_stride(primals_5, (14,), (1,))
assert_size_stride(primals_6, (4, 384, 1, 1), (384, 1, 1, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 384, 64, 64), (1572864, 1, 24576, 384
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(1536, 4096)](primals_3, buf0, 1536, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 2, 64, 64), (8192, 1, 128, 2))
buf2 = empty_strided_cuda((4, 2, 64, 64), (8192, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_1[grid(8, 4096)](buf1, primals_2, buf2,
8, 4096, XBLOCK=256, YBLOCK=1, num_warps=4, num_stages=1)
del buf1
del primals_2
buf3 = extern_kernels.convolution(buf0, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 14, 64, 64), (57344, 1, 896, 14))
buf4 = empty_strided_cuda((4, 14, 64, 64), (57344, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_2[grid(56, 4096)](buf3, primals_5,
buf4, 56, 4096, XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del buf3
del primals_5
buf5 = extern_kernels.convolution(buf0, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 4, 64, 64), (16384, 1, 256, 4))
buf6 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_3[grid(16, 4096)](buf5, primals_7,
buf6, 16, 4096, XBLOCK=32, YBLOCK=16, num_warps=4, num_stages=1)
del buf5
del primals_7
return buf2, buf4, buf6, primals_1, buf0, primals_4, primals_6
def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-06):
"""Calculate overlap between two set of bboxes.
If ``is_aligned `` is ``False``, then calculate the overlaps between each
bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
pair of bboxes1 and bboxes2.
Args:
bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
B indicates the batch dim, in shape (B1, B2, ..., Bn).
If ``is_aligned `` is ``True``, then m and n must be equal.
mode (str): "iou" (intersection over union) or "iof" (intersection over
foreground).
is_aligned (bool, optional): If True, then m and n must be equal.
Default False.
eps (float, optional): A value added to the denominator for numerical
stability. Default 1e-6.
Returns:
Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
Example:
>>> bboxes1 = torch.FloatTensor([
>>> [0, 0, 10, 10],
>>> [10, 10, 20, 20],
>>> [32, 32, 38, 42],
>>> ])
>>> bboxes2 = torch.FloatTensor([
>>> [0, 0, 10, 20],
>>> [0, 10, 10, 19],
>>> [10, 10, 20, 20],
>>> ])
>>> overlaps = bbox_overlaps(bboxes1, bboxes2)
>>> assert overlaps.shape == (3, 3)
>>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)
>>> assert overlaps.shape == (3, )
Example:
>>> empty = torch.empty(0, 4)
>>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])
>>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
>>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
>>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
"""
assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'
assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
batch_shape = bboxes1.shape[:-2]
rows = bboxes1.size(-2)
cols = bboxes2.size(-2)
if is_aligned:
assert rows == cols
if rows * cols == 0:
if is_aligned:
return bboxes1.new(batch_shape + (rows,))
else:
return bboxes1.new(batch_shape + (rows, cols))
area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] -
bboxes1[..., 1])
area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] -
bboxes2[..., 1])
if is_aligned:
lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])
rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1 + area2 - overlap
else:
union = area1
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
else:
lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2])
rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[..., 0] * wh[..., 1]
if mode in ['iou', 'giou']:
union = area1[..., None] + area2[..., None, :] - overlap
else:
union = area1[..., None]
if mode == 'giou':
enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[...,
None, :, :2])
enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[...,
None, :, 2:])
eps = union.new_tensor([eps])
union = torch.max(union, eps)
ious = overlap / union
if mode in ['iou', 'iof']:
return ious
enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
enclose_area = torch.max(enclose_area, eps)
gious = ious - (enclose_area - union) / enclose_area
return gious
def box3d_to_bev(boxes3d):
"""Convert rotated 3d boxes in XYZWHDR format to BEV in XYWHR format.
Args:
boxes3d (torch.Tensor): Rotated boxes in XYZWHDR format.
Returns:
torch.Tensor: Converted BEV boxes in XYWHR format.
"""
return boxes3d[:, [0, 1, 3, 4, 6]]
def limit_period(val, offset=0.5, period=np.pi):
"""Limit the value into a period for periodic function.
Args:
val (torch.Tensor): The value to be converted.
offset (float, optional): Offset to set the value range. Defaults to 0.5.
period ([type], optional): Period of the value. Defaults to np.pi.
Returns:
torch.Tensor: Value in the range of [-offset * period, (1-offset) * period]
"""
return val - torch.floor(val / period + offset) * period
def box3d_to_bev2d(boxes3d):
"""Convert rotated 3d boxes in XYZWHDR format to neareset BEV without
rotation.
Args:
boxes3d (torch.Tensor): Rotated boxes in XYZWHDR format.
Returns:
torch.Tensor: Converted BEV boxes in XYWH format.
"""
bev_rotated_boxes = box3d_to_bev(boxes3d)
rotations = bev_rotated_boxes[:, -1]
normed_rotations = torch.abs(limit_period(rotations, 0.5, np.pi))
conditions = (normed_rotations > np.pi / 4)[..., None]
bboxes_xywh = torch.where(conditions, bev_rotated_boxes[:, [0, 1, 3, 2]
], bev_rotated_boxes[:, :4])
centers = bboxes_xywh[:, :2]
dims = bboxes_xywh[:, 2:]
bev_boxes = torch.cat([centers - dims / 2, centers + dims / 2], dim=-1)
return bev_boxes
def xywhr_to_xyxyr(boxes_xywhr):
"""Convert rotated boxes in XYWHR format to XYXYR format.
Args:
boxes_xywhr (torch.Tensor): Rotated boxes in XYWHR format.
Returns:
torch.Tensor: Converted boxes in XYXYR format.
"""
boxes = torch.zeros_like(boxes_xywhr)
half_w = boxes_xywhr[:, 2] / 2
half_h = boxes_xywhr[:, 3] / 2
boxes[:, 0] = boxes_xywhr[:, 0] - half_w
boxes[:, 1] = boxes_xywhr[:, 1] - half_h
boxes[:, 2] = boxes_xywhr[:, 0] + half_w
boxes[:, 3] = boxes_xywhr[:, 1] + half_h
boxes[:, 4] = boxes_xywhr[:, 4]
return boxes
def multiclass_nms(boxes, scores, score_thr):
"""Multi-class nms for 3D boxes.
Args:
boxes (torch.Tensor): Multi-level boxes with shape (N, M).
M is the dimensions of boxes.
scores (torch.Tensor): Multi-level boxes with shape
(N, ). N is the number of boxes.
score_thr (float): Score threshold to filter boxes with low
confidence.
Returns:
list[torch.Tensor]: Return a list of indices after nms,
with an entry for each class.
"""
idxs = []
for i in range(scores.shape[1]):
cls_inds = scores[:, i] > score_thr
if not cls_inds.any():
idxs.append(torch.tensor([], dtype=torch.long, device=cls_inds.
device))
continue
orig_idx = torch.arange(cls_inds.shape[0], device=cls_inds.device,
dtype=torch.long)[cls_inds]
_scores = scores[cls_inds, i]
_boxes = boxes[cls_inds, :]
_bev = xywhr_to_xyxyr(box3d_to_bev(_boxes))
idx = nms(_bev, _scores, 0.01)
idxs.append(orig_idx[idx])
return idxs
class Anchor3DRangeGenerator(object):
"""3D Anchor Generator by range.
This anchor generator generates anchors by the given range in different
feature levels.
Args:
ranges (list[list[float]]): Ranges of different anchors.
The ranges are the same across different feature levels. But may
vary for different anchor sizes if size_per_range is True.
sizes (list[list[float]]): 3D sizes of anchors.
rotations (list[float]): Rotations of anchors in a feature grid.
"""
def __init__(self, ranges, sizes=[[1.6, 3.9, 1.56]], rotations=[0,
1.5707963]):
if len(sizes) != len(ranges):
assert len(ranges) == 1
ranges = ranges * len(sizes)
assert len(ranges) == len(sizes)
self.sizes = sizes
self.ranges = ranges
self.rotations = rotations
@property
def num_base_anchors(self):
"""list[int]: Total number of base anchors in a feature grid."""
num_rot = len(self.rotations)
num_size = torch.tensor(self.sizes).reshape(-1, 3).size(0)
return num_rot * num_size
def grid_anchors(self, featmap_size, device='cuda'):
"""Generate grid anchors of a single level feature map.
This function is usually called by method ``self.grid_anchors``.
Args:
featmap_size (tuple[int]): Size of the feature map.
device (str, optional): Device the tensor will be put on.
Defaults to 'cuda'.
Returns:
torch.Tensor: Anchors in the overall feature map.
"""
mr_anchors = []
for anchor_range, anchor_size in zip(self.ranges, self.sizes):
mr_anchors.append(self.anchors_single_range(featmap_size,
anchor_range, anchor_size, self.rotations, device=device))
mr_anchors = torch.cat(mr_anchors, dim=-3)
return mr_anchors
def anchors_single_range(self, feature_size, anchor_range, sizes=[[1.6,
3.9, 1.56]], rotations=[0, 1.5707963], device='cuda'):
"""Generate anchors in a single range.
Args:
feature_size (list[float] | tuple[float]): Feature map size. It is
either a list of a tuple of [D, H, W](in order of z, y, and x).
anchor_range (torch.Tensor | list[float]): Range of anchors with
shape [6]. The order is consistent with that of anchors, i.e.,
(x_min, y_min, z_min, x_max, y_max, z_max).
sizes (list[list] | np.ndarray | torch.Tensor): Anchor size with
shape [N, 3], in order of x, y, z.
rotations (list[float] | np.ndarray | torch.Tensor): Rotations of
anchors in a single feature grid.
device (str): Devices that the anchors will be put on.
Returns:
torch.Tensor: Anchors with shape [*feature_size, num_sizes, num_rots, 7].
"""
if len(feature_size) == 2:
feature_size = [1, feature_size[0], feature_size[1]]
anchor_range = torch.tensor(anchor_range, device=device)
z_centers = torch.linspace(anchor_range[2], anchor_range[5],
feature_size[0], device=device)
y_centers = torch.linspace(anchor_range[1], anchor_range[4],
feature_size[1], device=device)
x_centers = torch.linspace(anchor_range[0], anchor_range[3],
feature_size[2], device=device)
sizes = torch.tensor(sizes, device=device).reshape(-1, 3)
rotations = torch.tensor(rotations, device=device)
rets = torch.meshgrid(x_centers, y_centers, z_centers, rotations)
rets = list(rets)
for i in range(len(rets)):
rets[i] = rets[i].unsqueeze(-2).unsqueeze(-1)
sizes = sizes.reshape([1, 1, 1, 1, 1, 3])
tile_size_shape = list(rets[0].shape)
tile_size_shape[3] = 1
sizes = sizes.repeat(tile_size_shape)
rets.insert(3, sizes)
ret = torch.cat(rets, dim=-1).permute([2, 1, 0, 3, 4, 5])
return ret
class BBoxCoder(object):
"""Bbox Coder for 3D boxes.
Args:
code_size (int): The dimension of boxes to be encoded.
"""
def __init__(self):
super(BBoxCoder, self).__init__()
@staticmethod
def encode(src_boxes, dst_boxes):
"""Get box regression transformation deltas (dx, dy, dz, dw, dh, dl, dr,
dv*) that can be used to transform the `src_boxes` into the
`target_boxes`.
Args:
src_boxes (torch.Tensor): source boxes, e.g., object proposals.
dst_boxes (torch.Tensor): target of the transformation, e.g.,
ground-truth boxes.
Returns:
torch.Tensor: Box transformation deltas.
"""
xa, ya, za, wa, la, ha, ra = torch.split(src_boxes, 1, dim=-1)
xg, yg, zg, wg, lg, hg, rg = torch.split(dst_boxes, 1, dim=-1)
za = za + ha / 2
zg = zg + hg / 2
diagonal = torch.sqrt(la ** 2 + wa ** 2)
xt = (xg - xa) / diagonal
yt = (yg - ya) / diagonal
zt = (zg - za) / ha
lt = torch.log(lg / la)
wt = torch.log(wg / wa)
ht = torch.log(hg / ha)
rt = rg - ra
return torch.cat([xt, yt, zt, wt, lt, ht, rt], dim=-1)
@staticmethod
def decode(anchors, deltas):
"""Apply transformation `deltas` (dx, dy, dz, dw, dh, dl, dr, dv*) to
`boxes`.
Args:
anchors (torch.Tensor): Parameters of anchors with shape (N, 7).
deltas (torch.Tensor): Encoded boxes with shape
(N, 7+n) [x, y, z, w, l, h, r, velo*].
Returns:
torch.Tensor: Decoded boxes.
"""
xa, ya, za, wa, la, ha, ra = torch.split(anchors, 1, dim=-1)
xt, yt, zt, wt, lt, ht, rt = torch.split(deltas, 1, dim=-1)
za = za + ha / 2
diagonal = torch.sqrt(la ** 2 + wa ** 2)
xg = xt * diagonal + xa
yg = yt * diagonal + ya
zg = zt * ha + za
lg = torch.exp(lt) * la
wg = torch.exp(wt) * wa
hg = torch.exp(ht) * ha
rg = rt + ra
zg = zg - hg / 2
return torch.cat([xg, yg, zg, wg, lg, hg, rg], dim=-1)
class Anchor3DHeadNew(nn.Module):
def __init__(self, num_classes=1, in_channels=384, feat_channels=384,
nms_pre=100, score_thr=0.1, dir_offset=0, ranges=[[0, -40.0, -3,
70.0, 40.0, 1]], sizes=[[0.6, 1.0, 1.5]], rotations=[0, 1.57],
iou_thr=[[0.35, 0.5]]):
super().__init__()
self.in_channels = in_channels
self.num_classes = num_classes
self.feat_channels = feat_channels
self.nms_pre = nms_pre
self.score_thr = score_thr
self.dir_offset = dir_offset
self.iou_thr = iou_thr
if len(self.iou_thr) != num_classes:
assert len(self.iou_thr) == 1
self.iou_thr = self.iou_thr * num_classes
assert len(self.iou_thr) == num_classes
self.anchor_generator = Anchor3DRangeGenerator(ranges=ranges, sizes
=sizes, rotations=rotations)
self.num_anchors = self.anchor_generator.num_base_anchors
self.bbox_coder = BBoxCoder()
self.box_code_size = 7
self.fp16_enabled = False
self.cls_out_channels = self.num_anchors * self.num_classes
self.conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1)
self.conv_reg = nn.Conv2d(self.feat_channels, self.num_anchors *
self.box_code_size, 1)
self.conv_dir_cls = nn.Conv2d(self.feat_channels, self.num_anchors *
2, 1)
self.init_weights()
@staticmethod
def bias_init_with_prob(prior_prob):
"""Initialize conv/fc bias value according to giving probablity."""
bias_init = float(-np.log((1 - prior_prob) / prior_prob))
return bias_init
@staticmethod
def normal_init(module, mean=0, std=1, bias=0):
nn.init.normal_(module.weight, mean, std)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def init_weights(self):
"""Initialize the weights of head."""
bias_cls = self.bias_init_with_prob(0.01)
self.normal_init(self.conv_cls, std=0.01, bias=bias_cls)
self.normal_init(self.conv_reg, std=0.01)
def assign_bboxes(self, pred_bboxes, target_bboxes):
"""Assigns target bboxes to given anchors.
Args:
pred_bboxes (torch.Tensor): Bbox predictions (anchors).
target_bboxes (torch.Tensor): Bbox targets.
Returns:
torch.Tensor: Assigned target bboxes for each given anchor.
torch.Tensor: Flat index of matched targets.
torch.Tensor: Index of positive matches.
torch.Tensor: Index of negative matches.
"""
anchors = [self.anchor_generator.grid_anchors(pred_bboxes.shape[-2:
], device=pred_bboxes.device) for _ in range(len(target_bboxes))]
anchors_cnt = torch.tensor(anchors[0].shape[:-1]).prod()
rot_angles = anchors[0].shape[-2]
assigned_bboxes, target_idxs, pos_idxs, neg_idxs = [], [], [], []
def flatten_idx(idx, j):
"""Inject class dimension in the given indices (...
z * rot_angles + x) --> (.. z * num_classes * rot_angles + j * rot_angles + x)
"""
z = idx // rot_angles
x = idx % rot_angles
return z * self.num_classes * rot_angles + j * rot_angles + x
idx_off = 0
for i in range(len(target_bboxes)):
for j, (neg_th, pos_th) in enumerate(self.iou_thr):
anchors_stride = anchors[i][..., j, :, :].reshape(-1, self.
box_code_size)
if target_bboxes[i].shape[0] == 0:
assigned_bboxes.append(torch.zeros((0, 7), device=
pred_bboxes.device))
target_idxs.append(torch.zeros((0,), dtype=torch.long,
device=pred_bboxes.device))
pos_idxs.append(torch.zeros((0,), dtype=torch.long,
device=pred_bboxes.device))
neg_idxs.append(torch.zeros((0,), dtype=torch.long,
device=pred_bboxes.device))
continue
overlaps = bbox_overlaps(box3d_to_bev2d(target_bboxes[i]),
box3d_to_bev2d(anchors_stride))
max_overlaps, argmax_overlaps = overlaps.max(dim=0)
gt_max_overlaps, _ = overlaps.max(dim=1)
pos_idx = max_overlaps >= pos_th
neg_idx = (max_overlaps >= 0) & (max_overlaps < neg_th)
for k in range(len(target_bboxes[i])):
if gt_max_overlaps[k] >= neg_th:
pos_idx[overlaps[k, :] == gt_max_overlaps[k]] = True
assigned_bboxes.append(self.bbox_coder.encode(
anchors_stride[pos_idx], target_bboxes[i][
argmax_overlaps[pos_idx]]))
target_idxs.append(argmax_overlaps[pos_idx] + idx_off)
pos_idx = flatten_idx(pos_idx.nonzero(as_tuple=False).
squeeze(-1), j) + i * anchors_cnt
neg_idx = flatten_idx(neg_idx.nonzero(as_tuple=False).
squeeze(-1), j) + i * anchors_cnt
pos_idxs.append(pos_idx)
neg_idxs.append(neg_idx)
idx_off += len(target_bboxes[i])
return torch.cat(assigned_bboxes, axis=0), torch.cat(target_idxs,
axis=0), torch.cat(pos_idxs, axis=0), torch.cat(neg_idxs, axis=0)
def get_bboxes(self, cls_scores, bbox_preds, dir_preds):
"""Get bboxes of anchor head.
Args:
cls_scores (list[torch.Tensor]): Class scores.
bbox_preds (list[torch.Tensor]): Bbox predictions.
dir_cls_preds (list[torch.Tensor]): Direction
class predictions.
Returns:
tuple[torch.Tensor]: Prediction results of batches
(bboxes, scores, labels).
"""
bboxes, scores, labels = [], [], []
for cls_score, bbox_pred, dir_pred in zip(cls_scores, bbox_preds,
dir_preds):
b, s, l = self.get_bboxes_single(cls_score, bbox_pred, dir_pred)
bboxes.append(b)
scores.append(s)
labels.append(l)
return bboxes, scores, labels
def get_bboxes_single(self, cls_scores, bbox_preds, dir_preds):
"""Get bboxes of anchor head.
Args:
cls_scores (list[torch.Tensor]): Class scores.
bbox_preds (list[torch.Tensor]): Bbox predictions.
dir_cls_preds (list[torch.Tensor]): Direction
class predictions.
Returns:
tuple[torch.Tensor]: Prediction results of batches
(bboxes, scores, labels).
"""
assert cls_scores.size()[-2:] == bbox_preds.size()[-2:]
assert cls_scores.size()[-2:] == dir_preds.size()[-2:]
anchors = self.anchor_generator.grid_anchors(cls_scores.shape[-2:],
device=cls_scores.device)
anchors = anchors.reshape(-1, self.box_code_size)
dir_preds = dir_preds.permute(1, 2, 0).reshape(-1, 2)
dir_scores = torch.max(dir_preds, dim=-1)[1]
cls_scores = cls_scores.permute(1, 2, 0).reshape(-1, self.num_classes)
scores = cls_scores.sigmoid()
bbox_preds = bbox_preds.permute(1, 2, 0).reshape(-1, self.box_code_size
)
if scores.shape[0] > self.nms_pre:
max_scores, _ = scores.max(dim=1)
_, topk_inds = max_scores.topk(self.nms_pre)
anchors = anchors[topk_inds, :]
bbox_preds = bbox_preds[topk_inds, :]
scores = scores[topk_inds, :]
dir_scores = dir_scores[topk_inds]
bboxes = self.bbox_coder.decode(anchors, bbox_preds)
idxs = multiclass_nms(bboxes, scores, self.score_thr)
labels = [torch.full((len(idxs[i]),), i, dtype=torch.long) for i in
range(self.num_classes)]
labels = torch.cat(labels)
scores = [scores[idxs[i], i] for i in range(self.num_classes)]
scores = torch.cat(scores)
idxs = torch.cat(idxs)
bboxes = bboxes[idxs]
dir_scores = dir_scores[idxs]
if bboxes.shape[0] > 0:
dir_rot = limit_period(bboxes[..., 6] - self.dir_offset, 1, np.pi)
bboxes[..., 6] = dir_rot + self.dir_offset + np.pi * dir_scores
return bboxes, scores, labels
def forward(self, input_0):
primals_1 = self.conv_cls.weight
primals_2 = self.conv_cls.bias
primals_4 = self.conv_reg.weight
primals_5 = self.conv_reg.bias
primals_6 = self.conv_dir_cls.weight
primals_7 = self.conv_dir_cls.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1], output[2]
| mi-exwzd/Open3D-ML | Anchor3DHead | false | 16,104 | [
"MIT"
] | 447 | d58b24edd37de7889446360164cd5500e0bde060 | https://github.com/mi-exwzd/Open3D-ML/tree/d58b24edd37de7889446360164cd5500e0bde060 |
CRF | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pg/cpg5vjqssrlsnglazs6sllqz33jc7cdnlonls4f4kcehzuhy7ewb.py
# Topologically Sorted Source Nodes: [feats_norm], Original ATen: [aten.linalg_vector_norm]
# Source node to ATen node mapping:
# feats_norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2], True), kwargs = {})
# %pow_2 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
triton_poi_fused_linalg_vector_norm_0 = async_compile.triton('triton_poi_fused_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_linalg_vector_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/n5/cn5teeeignn4sp3kp3ujrwymtwcb7yaywvx3h7kpqyi4ixhma2ot.py
# Topologically Sorted Source Nodes: [pairwise_sim], Original ATen: [aten.div]
# Source node to ATen node mapping:
# pairwise_sim => div
# Graph fragment:
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%bmm_1, %bmm), kwargs = {})
triton_poi_fused_div_1 = async_compile.triton('triton_poi_fused_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_out_ptr0 + (x0), xmask)
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zt/cztirch7jrz5e7flqpqcr5acbutu5sz47zvhuaocrfzdpb7umhxt.py
# Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_1, sub, mul_2, sub_1, pairwise_potential_E], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum]
# Source node to ATen node mapping:
# W_sym => div_1
# add => add
# mul_1 => mul_1
# mul_2 => mul_2
# pairwise_potential => mul
# pairwise_potential_E => sum_2
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %permute_2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {})
# %mul : [num_users=20] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, %mul), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %permute_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %mul), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_1, [2], True), kwargs = {})
triton_poi_fused_add_div_mul_rsub_sub_sum_2 = async_compile.triton('triton_poi_fused_add_div_mul_rsub_sub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_rsub_sub_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_rsub_sub_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp2 = tl.load(in_ptr1 + (4*x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp16 = tl.load(in_ptr1 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr2 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp29 = tl.load(in_ptr1 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp42 = tl.load(in_ptr1 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp43 = tl.load(in_ptr2 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp44 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp5 = tmp3 + tmp4
tmp6 = 0.5
tmp7 = tmp5 * tmp6
tmp8 = tmp2 * tmp7
tmp9 = tmp1 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp1
tmp12 = tmp11 * tmp8
tmp13 = tmp9 - tmp12
tmp15 = tl.sigmoid(tmp14)
tmp19 = tmp17 + tmp18
tmp20 = tmp19 * tmp6
tmp21 = tmp16 * tmp20
tmp22 = tmp15 * tmp21
tmp23 = tmp10 - tmp15
tmp24 = tmp23 * tmp21
tmp25 = tmp22 - tmp24
tmp26 = tmp13 + tmp25
tmp28 = tl.sigmoid(tmp27)
tmp32 = tmp30 + tmp31
tmp33 = tmp32 * tmp6
tmp34 = tmp29 * tmp33
tmp35 = tmp28 * tmp34
tmp36 = tmp10 - tmp28
tmp37 = tmp36 * tmp34
tmp38 = tmp35 - tmp37
tmp39 = tmp26 + tmp38
tmp41 = tl.sigmoid(tmp40)
tmp45 = tmp43 + tmp44
tmp46 = tmp45 * tmp6
tmp47 = tmp42 * tmp46
tmp48 = tmp41 * tmp47
tmp49 = tmp10 - tmp41
tmp50 = tmp49 * tmp47
tmp51 = tmp48 - tmp50
tmp52 = tmp39 + tmp51
tl.store(out_ptr0 + (x2), tmp52, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4y/c4y7ef5neroioo45nhdxozozllc2bmzg27jz2uy5fyq34ehhexcn.py
# Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_3, sub_2, mul_4, sub_3, pairwise_potential_E_1], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum]
# Source node to ATen node mapping:
# W_sym => div_1
# add => add
# mul_3 => mul_3
# mul_4 => mul_4
# pairwise_potential => mul
# pairwise_potential_E_1 => sum_3
# sub_2 => sub_2
# sub_3 => sub_3
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_2, %permute_2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {})
# %mul : [num_users=20] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %div_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_4, %mul), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %permute_4), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %mul), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%sub_3, [2], True), kwargs = {})
triton_poi_fused_add_div_mul_rsub_sub_sum_3 = async_compile.triton('triton_poi_fused_add_div_mul_rsub_sub_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_rsub_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 20, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_rsub_sub_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + (4*x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x0), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (4 + x0 + (16*x1)), xmask)
tmp17 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr2 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (8 + x0 + (16*x1)), xmask)
tmp32 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr2 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp36 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr0 + (12 + x0 + (16*x1)), xmask)
tmp47 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp50 = tl.load(in_ptr2 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp51 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp52 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp7 = tmp5 + tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tmp4 * tmp9
tmp11 = tmp3 * tmp10
tmp12 = 1.0
tmp13 = tmp12 - tmp3
tmp14 = tmp13 * tmp10
tmp15 = tmp11 - tmp14
tmp18 = tmp16 + tmp17
tmp19 = tl.sigmoid(tmp18)
tmp23 = tmp21 + tmp22
tmp24 = tmp23 * tmp8
tmp25 = tmp20 * tmp24
tmp26 = tmp19 * tmp25
tmp27 = tmp12 - tmp19
tmp28 = tmp27 * tmp25
tmp29 = tmp26 - tmp28
tmp30 = tmp15 + tmp29
tmp33 = tmp31 + tmp32
tmp34 = tl.sigmoid(tmp33)
tmp38 = tmp36 + tmp37
tmp39 = tmp38 * tmp8
tmp40 = tmp35 * tmp39
tmp41 = tmp34 * tmp40
tmp42 = tmp12 - tmp34
tmp43 = tmp42 * tmp40
tmp44 = tmp41 - tmp43
tmp45 = tmp30 + tmp44
tmp48 = tmp46 + tmp47
tmp49 = tl.sigmoid(tmp48)
tmp53 = tmp51 + tmp52
tmp54 = tmp53 * tmp8
tmp55 = tmp50 * tmp54
tmp56 = tmp49 * tmp55
tmp57 = tmp12 - tmp49
tmp58 = tmp57 * tmp55
tmp59 = tmp56 - tmp58
tmp60 = tmp45 + tmp59
tl.store(out_ptr0 + (x2), tmp60, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lu/cluvgpnuehlfe66u3wqcj3n6cyvp6elikx7xp4egoya6c67hvs6p.py
# Topologically Sorted Source Nodes: [logits_9], Original ATen: [aten.add]
# Source node to ATen node mapping:
# logits_9 => add_10
# Graph fragment:
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %sum_11), kwargs = {})
triton_poi_fused_add_4 = async_compile.triton('triton_poi_fused_add_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [feats_norm], Original ATen: [aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_linalg_vector_norm_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pairwise_norm], Original ATen: [aten.bmm]
extern_kernels.bmm(buf0, reinterpret_tensor(buf0, (4, 1, 4), (4, 16, 1), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pairwise_dot], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_1, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), out=buf2)
del primals_1
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [pairwise_sim], Original ATen: [aten.div]
triton_poi_fused_div_1.run(buf3, buf2, 64, grid=grid(64), stream=stream0)
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_1, sub, mul_2, sub_1, pairwise_potential_E], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum]
triton_poi_fused_add_div_mul_rsub_sub_sum_2.run(primals_3, buf3, primals_2, buf4, 16, grid=grid(16), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_3, sub_2, mul_4, sub_3, pairwise_potential_E_1], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum]
triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf4, buf3, primals_2, buf5, 16, grid=grid(16), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_5, sub_4, mul_6, sub_5, pairwise_potential_E_2], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum]
triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf5, buf3, primals_2, buf6, 16, grid=grid(16), stream=stream0)
buf7 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_7, sub_6, mul_8, sub_7, pairwise_potential_E_3], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum]
triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf6, buf3, primals_2, buf7, 16, grid=grid(16), stream=stream0)
buf8 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_9, sub_8, mul_10, sub_9, pairwise_potential_E_4], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum]
triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf7, buf3, primals_2, buf8, 16, grid=grid(16), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_11, sub_10, mul_12, sub_11, pairwise_potential_E_5], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum]
triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf8, buf3, primals_2, buf9, 16, grid=grid(16), stream=stream0)
buf10 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_13, sub_12, mul_14, sub_13, pairwise_potential_E_6], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum]
triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf9, buf3, primals_2, buf10, 16, grid=grid(16), stream=stream0)
buf11 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_15, sub_14, mul_16, sub_15, pairwise_potential_E_7], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum]
triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf10, buf3, primals_2, buf11, 16, grid=grid(16), stream=stream0)
buf12 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_17, sub_16, mul_18, sub_17, pairwise_potential_E_8], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum]
triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf11, buf3, primals_2, buf12, 16, grid=grid(16), stream=stream0)
buf13 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [add, W_sym, pairwise_potential, mul_19, sub_18, mul_20, sub_19, pairwise_potential_E_9], Original ATen: [aten.add, aten.div, aten.mul, aten.rsub, aten.sub, aten.sum]
triton_poi_fused_add_div_mul_rsub_sub_sum_3.run(primals_3, buf12, buf3, primals_2, buf13, 16, grid=grid(16), stream=stream0)
del buf12
buf14 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [logits_9], Original ATen: [aten.add]
triton_poi_fused_add_4.run(primals_3, buf13, buf14, 64, grid=grid(64), stream=stream0)
del buf13
return (buf14, primals_2, primals_3, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class CRF(nn.Module):
def __init__(self, num_nodes, iteration=10):
"""Initialize the CRF module
Args:
num_nodes: int, number of nodes/patches within the fully CRF
iteration: int, number of mean field iterations, e.g. 10
"""
super(CRF, self).__init__()
self.num_nodes = num_nodes
self.iteration = iteration
self.W = nn.Parameter(torch.zeros(1, num_nodes, num_nodes))
def forward(self, feats, logits):
"""Performing the CRF. Algorithm details is explained below:
Within the paper, I formulate the CRF distribution using negative
energy and cost, e.g. cosine distance, to derive pairwise potentials
following the convention in energy based models. But for implementation
simplicity, I use reward, e.g. cosine similarity to derive pairwise
potentials. So now, pairwise potentials would encourage high reward for
assigning (y_i, y_j) with the same label if (x_i, x_j) are similar, as
measured by cosine similarity, pairwise_sim. For
pairwise_potential_E = torch.sum(
probs * pairwise_potential - (1 - probs) * pairwise_potential,
dim=2, keepdim=True
)
This is taking the expectation of pairwise potentials using the current
marginal distribution of each patch being tumor, i.e. probs. There are
four cases to consider when taking the expectation between (i, j):
1. i=T,j=T; 2. i=N,j=T; 3. i=T,j=N; 4. i=N,j=N
probs is the marginal distribution of each i being tumor, therefore
logits > 0 means tumor and logits < 0 means normal. Given this, the
full expectation equation should be:
[probs * +pairwise_potential] + [(1 - probs) * +pairwise_potential] +
case 1 case 2
[probs * -pairwise_potential] + [(1 - probs) * -pairwise_potential]
case 3 case 4
positive sign rewards logits to be more tumor and negative sign rewards
logits to be more normal. But because of label compatibility, i.e. the
indicator function within equation 3 in the paper, case 2 and case 3
are dropped, which ends up being:
probs * pairwise_potential - (1 - probs) * pairwise_potential
In high level speaking, if (i, j) embedding are different, then
pairwise_potential, as computed as cosine similarity, would approach 0,
which then as no affect anyway. if (i, j) embedding are similar, then
pairwise_potential would be a positive reward. In this case,
if probs -> 1, then pairwise_potential promotes tumor probability;
if probs -> 0, then -pairwise_potential promotes normal probability.
Args:
feats: 3D tensor with the shape of
[batch_size, num_nodes, embedding_size], where num_nodes is the
number of patches within a grid, e.g. 9 for a 3x3 grid;
embedding_size is the size of extracted feature representation for
each patch from ResNet, e.g. 512
logits: 3D tensor with shape of [batch_size, num_nodes, 1], the
logit of each patch within the grid being tumor before CRF
Returns:
logits: 3D tensor with shape of [batch_size, num_nodes, 1], the
logit of each patch within the grid being tumor after CRF
"""
feats_norm = torch.norm(feats, p=2, dim=2, keepdim=True)
pairwise_norm = torch.bmm(feats_norm, torch.transpose(feats_norm, 1, 2)
)
pairwise_dot = torch.bmm(feats, torch.transpose(feats, 1, 2))
pairwise_sim = pairwise_dot / pairwise_norm
W_sym = (self.W + torch.transpose(self.W, 1, 2)) / 2
pairwise_potential = pairwise_sim * W_sym
unary_potential = logits.clone()
for i in range(self.iteration):
probs = torch.transpose(logits.sigmoid(), 1, 2)
pairwise_potential_E = torch.sum(probs * pairwise_potential - (
1 - probs) * pairwise_potential, dim=2, keepdim=True)
logits = unary_potential + pairwise_potential_E
return logits
def __repr__(self):
return 'CRF(num_nodes={}, iteration={})'.format(self.num_nodes,
self.iteration)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'num_nodes': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_linalg_vector_norm_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused_div_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_out_ptr0 + x0, xmask)
tmp2 = tmp0 / tmp1
tl.store(in_out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_rsub_sub_sum_2(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp2 = tl.load(in_ptr1 + 4 * x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp17 = tl.load(in_ptr2 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr2 + (4 + x0), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp29 = tl.load(in_ptr1 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp30 = tl.load(in_ptr2 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp31 = tl.load(in_ptr2 + (8 + x0), xmask, eviction_policy='evict_last')
tmp40 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp42 = tl.load(in_ptr1 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp43 = tl.load(in_ptr2 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp44 = tl.load(in_ptr2 + (12 + x0), xmask, eviction_policy='evict_last')
tmp1 = tl.sigmoid(tmp0)
tmp5 = tmp3 + tmp4
tmp6 = 0.5
tmp7 = tmp5 * tmp6
tmp8 = tmp2 * tmp7
tmp9 = tmp1 * tmp8
tmp10 = 1.0
tmp11 = tmp10 - tmp1
tmp12 = tmp11 * tmp8
tmp13 = tmp9 - tmp12
tmp15 = tl.sigmoid(tmp14)
tmp19 = tmp17 + tmp18
tmp20 = tmp19 * tmp6
tmp21 = tmp16 * tmp20
tmp22 = tmp15 * tmp21
tmp23 = tmp10 - tmp15
tmp24 = tmp23 * tmp21
tmp25 = tmp22 - tmp24
tmp26 = tmp13 + tmp25
tmp28 = tl.sigmoid(tmp27)
tmp32 = tmp30 + tmp31
tmp33 = tmp32 * tmp6
tmp34 = tmp29 * tmp33
tmp35 = tmp28 * tmp34
tmp36 = tmp10 - tmp28
tmp37 = tmp36 * tmp34
tmp38 = tmp35 - tmp37
tmp39 = tmp26 + tmp38
tmp41 = tl.sigmoid(tmp40)
tmp45 = tmp43 + tmp44
tmp46 = tmp45 * tmp6
tmp47 = tmp42 * tmp46
tmp48 = tmp41 * tmp47
tmp49 = tmp10 - tmp41
tmp50 = tmp49 * tmp47
tmp51 = tmp48 - tmp50
tmp52 = tmp39 + tmp51
tl.store(out_ptr0 + x2, tmp52, xmask)
@triton.jit
def triton_poi_fused_add_div_mul_rsub_sub_sum_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tmp1 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x0, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (4 + x0 + 16 * x1), xmask)
tmp17 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr3 + (4 + x0), xmask, eviction_policy='evict_last')
tmp31 = tl.load(in_ptr0 + (8 + x0 + 16 * x1), xmask)
tmp32 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp35 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp36 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp37 = tl.load(in_ptr3 + (8 + x0), xmask, eviction_policy='evict_last')
tmp46 = tl.load(in_ptr0 + (12 + x0 + 16 * x1), xmask)
tmp47 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp50 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp51 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp52 = tl.load(in_ptr3 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tmp7 = tmp5 + tmp6
tmp8 = 0.5
tmp9 = tmp7 * tmp8
tmp10 = tmp4 * tmp9
tmp11 = tmp3 * tmp10
tmp12 = 1.0
tmp13 = tmp12 - tmp3
tmp14 = tmp13 * tmp10
tmp15 = tmp11 - tmp14
tmp18 = tmp16 + tmp17
tmp19 = tl.sigmoid(tmp18)
tmp23 = tmp21 + tmp22
tmp24 = tmp23 * tmp8
tmp25 = tmp20 * tmp24
tmp26 = tmp19 * tmp25
tmp27 = tmp12 - tmp19
tmp28 = tmp27 * tmp25
tmp29 = tmp26 - tmp28
tmp30 = tmp15 + tmp29
tmp33 = tmp31 + tmp32
tmp34 = tl.sigmoid(tmp33)
tmp38 = tmp36 + tmp37
tmp39 = tmp38 * tmp8
tmp40 = tmp35 * tmp39
tmp41 = tmp34 * tmp40
tmp42 = tmp12 - tmp34
tmp43 = tmp42 * tmp40
tmp44 = tmp41 - tmp43
tmp45 = tmp30 + tmp44
tmp48 = tmp46 + tmp47
tmp49 = tl.sigmoid(tmp48)
tmp53 = tmp51 + tmp52
tmp54 = tmp53 * tmp8
tmp55 = tmp50 * tmp54
tmp56 = tmp49 * tmp55
tmp57 = tmp12 - tmp49
tmp58 = tmp57 * tmp55
tmp59 = tmp56 - tmp58
tmp60 = tmp45 + tmp59
tl.store(out_ptr0 + x2, tmp60, xmask)
@triton.jit
def triton_poi_fused_add_4(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
get_raw_stream(0)
triton_poi_fused_linalg_vector_norm_0[grid(16)](primals_1, buf0, 16,
XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf0, reinterpret_tensor(buf0, (4, 1, 4), (4, 16,
1), 0), out=buf1)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_1, reinterpret_tensor(primals_1, (4, 4,
4), (16, 1, 4), 0), out=buf2)
del primals_1
buf3 = buf1
del buf1
triton_poi_fused_div_1[grid(64)](buf3, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf4 = buf0
del buf0
triton_poi_fused_add_div_mul_rsub_sub_sum_2[grid(16)](primals_3,
buf3, primals_2, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3,
buf4, buf3, primals_2, buf5, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3,
buf5, buf3, primals_2, buf6, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf7 = buf5
del buf5
triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3,
buf6, buf3, primals_2, buf7, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf8 = buf6
del buf6
triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3,
buf7, buf3, primals_2, buf8, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3,
buf8, buf3, primals_2, buf9, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf10 = buf8
del buf8
triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3,
buf9, buf3, primals_2, buf10, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf11 = buf9
del buf9
triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3,
buf10, buf3, primals_2, buf11, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf12 = buf10
del buf10
triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3,
buf11, buf3, primals_2, buf12, 16, XBLOCK=16, num_warps=1,
num_stages=1)
buf13 = buf11
del buf11
triton_poi_fused_add_div_mul_rsub_sub_sum_3[grid(16)](primals_3,
buf12, buf3, primals_2, buf13, 16, XBLOCK=16, num_warps=1,
num_stages=1)
del buf12
buf14 = buf2
del buf2
triton_poi_fused_add_4[grid(64)](primals_3, buf13, buf14, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del buf13
return buf14, primals_2, primals_3, buf3
class CRFNew(nn.Module):
def __init__(self, num_nodes, iteration=10):
"""Initialize the CRF module
Args:
num_nodes: int, number of nodes/patches within the fully CRF
iteration: int, number of mean field iterations, e.g. 10
"""
super(CRFNew, self).__init__()
self.num_nodes = num_nodes
self.iteration = iteration
self.W = nn.Parameter(torch.zeros(1, num_nodes, num_nodes))
def __repr__(self):
return 'CRF(num_nodes={}, iteration={})'.format(self.num_nodes,
self.iteration)
def forward(self, input_0, input_1):
primals_2 = self.W
primals_1 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
| mingrui/NCRF | CRF | false | 16,105 | [
"Apache-2.0"
] | 734 | d3dcb50739a9eb8621d42ea98b7d0496afe430ca | https://github.com/mingrui/NCRF/tree/d3dcb50739a9eb8621d42ea98b7d0496afe430ca |
AODnet | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/w2/cw26etiuqgfsnlcvfovjrjfkwerbr3hb33ggi6l6pg47hpyjzaos.py
# Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=4] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3d/c3dyw6akmjhpp35jaqq3z7ozrdt7ywexn7u7aab2ekphhcccz3vm.py
# Topologically Sorted Source Nodes: [cat1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat1 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %relu_1], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 98304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 6
x0 = xindex % 4096
x2 = (xindex // 24576)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (12288*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 6, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4096*((-3) + x1)) + (12288*x2)), tmp6, other=0.0)
tmp10 = tl.load(in_ptr2 + ((-3) + x1), tmp6, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + (x3), tmp16, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/l7/cl7utfe7bhcs2m55c3vdbkrwcvmnsjji6vqhnykjtsppv3gprqlv.py
# Topologically Sorted Source Nodes: [cat2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat2 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu_1, %relu_2], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 98304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 6
x0 = xindex % 4096
x2 = (xindex // 24576)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (12288*x2)), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tmp13 = tl.full([1], 6, tl.int64)
tmp14 = tmp0 < tmp13
tmp15 = tl.load(in_ptr2 + (x0 + (4096*((-3) + x1)) + (12288*x2)), tmp12, other=0.0)
tmp16 = tl.load(in_ptr3 + ((-3) + x1), tmp12, eviction_policy='evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + (x3), tmp21, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uw/cuwzisxb4drputjagww2hliycpjgkk573x3xmc4tbpcgmr5atzho.py
# Topologically Sorted Source Nodes: [cat3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat3 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%relu, %relu_1, %relu_2, %relu_3], 1), kwargs = {})
triton_poi_fused_cat_3 = async_compile.triton('triton_poi_fused_cat_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 196608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 12
x0 = xindex % 4096
x2 = (xindex // 49152)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (12288*x2)), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 6, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + (4096*((-3) + x1)) + (12288*x2)), tmp9, other=0.0)
tmp11 = tl.load(in_ptr2 + ((-3) + x1), tmp9, eviction_policy='evict_last', other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tmp18 = tl.full([1], 9, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr3 + (x0 + (4096*((-6) + x1)) + (12288*x2)), tmp20, other=0.0)
tmp22 = tl.load(in_ptr4 + ((-6) + x1), tmp20, eviction_policy='evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = triton_helpers.maximum(tmp13, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp20, tmp24, tmp25)
tmp27 = tmp0 >= tmp18
tmp28 = tl.full([1], 12, tl.int64)
tmp29 = tmp0 < tmp28
tmp30 = tl.load(in_ptr5 + (x0 + (4096*((-9) + x1)) + (12288*x2)), tmp27, other=0.0)
tmp31 = tl.load(in_ptr6 + ((-9) + x1), tmp27, eviction_policy='evict_last', other=0.0)
tmp32 = tmp30 + tmp31
tmp33 = triton_helpers.maximum(tmp13, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp27, tmp33, tmp34)
tmp36 = tl.where(tmp20, tmp26, tmp35)
tmp37 = tl.where(tmp9, tmp16, tmp36)
tmp38 = tl.where(tmp4, tmp5, tmp37)
tl.store(out_ptr0 + (x3), tmp38, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7r/c7rhz6yw3jy35zprbjbr4xmjejtxhqk4shi6j6zgeuvzsdoolgg3.py
# Topologically Sorted Source Nodes: [conv2d_4, k, mul, sub, output, relu_5], Original ATen: [aten.convolution, aten.relu, aten.mul, aten.sub, aten.add, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# k => relu_4
# mul => mul
# output => add
# relu_5 => relu_5
# sub => sub
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_4 : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_4, %primals_3), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %relu_4), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_5, 0), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_4 = async_compile.triton('triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*i1', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x3), None)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp7 = tmp6 - tmp4
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp3, tmp9)
tmp11 = 0.0
tmp12 = tmp4 <= tmp11
tmp13 = tmp10 <= tmp11
tl.store(out_ptr0 + (x3), tmp10, None)
tl.store(out_ptr1 + (x3), tmp12, None)
tl.store(out_ptr2 + (x3), tmp13, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bo/cbonipi72qwxuhhf7mgzomqybvhp6dpyxvt6oh2zv22jokbykmre.py
# Topologically Sorted Source Nodes: [conv2d_3, x4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x4 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_8, %primals_9, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x3), tmp6, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (3, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (3, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (3, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_5, (3, ), (1, ))
assert_size_stride(primals_6, (3, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_7, (3, ), (1, ))
assert_size_stride(primals_8, (3, 6, 7, 7), (294, 49, 7, 1))
assert_size_stride(primals_9, (3, ), (1, ))
assert_size_stride(primals_10, (3, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_11, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 49152, grid=grid(49152), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat1], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf1, buf2, primals_5, buf3, 98304, grid=grid(98304), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf5 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat2], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf2, primals_5, buf4, primals_7, buf5, 98304, grid=grid(98304), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 12, 64, 64), (49152, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat3], Original ATen: [aten.cat]
triton_poi_fused_cat_3.run(buf1, buf2, primals_5, buf4, primals_7, buf6, primals_9, buf7, 196608, grid=grid(196608), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf9 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.float32)
buf11 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool)
buf10 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_4, k, mul, sub, output, relu_5], Original ATen: [aten.convolution, aten.relu, aten.mul, aten.sub, aten.add, aten.threshold_backward]
triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_4.run(buf8, primals_11, primals_3, buf9, buf11, buf10, 49152, grid=grid(49152), stream=stream0)
del buf8
del primals_11
buf12 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, x4], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_5.run(buf6, primals_9, buf12, 49152, grid=grid(49152), stream=stream0)
del buf6
del primals_9
buf13 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_2, x3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_5.run(buf4, primals_7, buf13, 49152, grid=grid(49152), stream=stream0)
del buf4
del primals_7
buf14 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_1, x2], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_5.run(buf2, primals_5, buf14, 49152, grid=grid(49152), stream=stream0)
del buf2
del primals_5
return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((3, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((3, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((3, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((3, 6, 7, 7), (294, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((3, 12, 3, 3), (108, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class AODnet(nn.Module):
def __init__(self):
super(AODnet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=1)
self.conv2 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3,
padding=1)
self.conv3 = nn.Conv2d(in_channels=6, out_channels=3, kernel_size=5,
padding=2)
self.conv4 = nn.Conv2d(in_channels=6, out_channels=3, kernel_size=7,
padding=3)
self.conv5 = nn.Conv2d(in_channels=12, out_channels=3, kernel_size=
3, padding=1)
self.b = 1
def forward(self, x):
x1 = F.relu(self.conv1(x))
x2 = F.relu(self.conv2(x1))
cat1 = torch.cat((x1, x2), 1)
x3 = F.relu(self.conv3(cat1))
cat2 = torch.cat((x2, x3), 1)
x4 = F.relu(self.conv4(cat2))
cat3 = torch.cat((x1, x2, x3, x4), 1)
k = F.relu(self.conv5(cat3))
if k.size() != x.size():
raise Exception('k, haze image are different size!')
output = k * x - k + self.b
return F.relu(output)
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 6
x0 = xindex % 4096
x2 = xindex // 24576
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 12288 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 6, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4096 * (-3 + x1) + 12288 * x2), tmp6,
other=0.0)
tmp10 = tl.load(in_ptr2 + (-3 + x1), tmp6, eviction_policy='evict_last',
other=0.0)
tmp11 = tmp9 + tmp10
tmp12 = tl.full([1], 0, tl.int32)
tmp13 = triton_helpers.maximum(tmp12, tmp11)
tmp14 = tl.full(tmp13.shape, 0.0, tmp13.dtype)
tmp15 = tl.where(tmp6, tmp13, tmp14)
tmp16 = tl.where(tmp4, tmp5, tmp15)
tl.store(out_ptr0 + x3, tmp16, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 6
x0 = xindex % 4096
x2 = xindex // 24576
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 12288 * x2), tmp4, other=0.0)
tmp6 = tl.load(in_ptr1 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full([1], 0, tl.int32)
tmp9 = triton_helpers.maximum(tmp8, tmp7)
tmp10 = tl.full(tmp9.shape, 0.0, tmp9.dtype)
tmp11 = tl.where(tmp4, tmp9, tmp10)
tmp12 = tmp0 >= tmp3
tl.full([1], 6, tl.int64)
tmp15 = tl.load(in_ptr2 + (x0 + 4096 * (-3 + x1) + 12288 * x2), tmp12,
other=0.0)
tmp16 = tl.load(in_ptr3 + (-3 + x1), tmp12, eviction_policy=
'evict_last', other=0.0)
tmp17 = tmp15 + tmp16
tmp18 = triton_helpers.maximum(tmp8, tmp17)
tmp19 = tl.full(tmp18.shape, 0.0, tmp18.dtype)
tmp20 = tl.where(tmp12, tmp18, tmp19)
tmp21 = tl.where(tmp4, tmp11, tmp20)
tl.store(out_ptr0 + x3, tmp21, None)
@triton.jit
def triton_poi_fused_cat_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 12
x0 = xindex % 4096
x2 = xindex // 49152
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 12288 * x2), tmp4, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 6, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x0 + 4096 * (-3 + x1) + 12288 * x2), tmp9,
other=0.0)
tmp11 = tl.load(in_ptr2 + (-3 + x1), tmp9, eviction_policy='evict_last',
other=0.0)
tmp12 = tmp10 + tmp11
tmp13 = tl.full([1], 0, tl.int32)
tmp14 = triton_helpers.maximum(tmp13, tmp12)
tmp15 = tl.full(tmp14.shape, 0.0, tmp14.dtype)
tmp16 = tl.where(tmp9, tmp14, tmp15)
tmp17 = tmp0 >= tmp7
tmp18 = tl.full([1], 9, tl.int64)
tmp19 = tmp0 < tmp18
tmp20 = tmp17 & tmp19
tmp21 = tl.load(in_ptr3 + (x0 + 4096 * (-6 + x1) + 12288 * x2), tmp20,
other=0.0)
tmp22 = tl.load(in_ptr4 + (-6 + x1), tmp20, eviction_policy=
'evict_last', other=0.0)
tmp23 = tmp21 + tmp22
tmp24 = triton_helpers.maximum(tmp13, tmp23)
tmp25 = tl.full(tmp24.shape, 0.0, tmp24.dtype)
tmp26 = tl.where(tmp20, tmp24, tmp25)
tmp27 = tmp0 >= tmp18
tl.full([1], 12, tl.int64)
tmp30 = tl.load(in_ptr5 + (x0 + 4096 * (-9 + x1) + 12288 * x2), tmp27,
other=0.0)
tmp31 = tl.load(in_ptr6 + (-9 + x1), tmp27, eviction_policy=
'evict_last', other=0.0)
tmp32 = tmp30 + tmp31
tmp33 = triton_helpers.maximum(tmp13, tmp32)
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp27, tmp33, tmp34)
tmp36 = tl.where(tmp20, tmp26, tmp35)
tmp37 = tl.where(tmp9, tmp16, tmp36)
tmp38 = tl.where(tmp4, tmp5, tmp37)
tl.store(out_ptr0 + x3, tmp38, None)
@triton.jit
def triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_4(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, None)
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = tmp4 * tmp5
tmp7 = tmp6 - tmp4
tmp8 = 1.0
tmp9 = tmp7 + tmp8
tmp10 = triton_helpers.maximum(tmp3, tmp9)
tmp11 = 0.0
tmp12 = tmp4 <= tmp11
tmp13 = tmp10 <= tmp11
tl.store(out_ptr0 + x3, tmp10, None)
tl.store(out_ptr1 + x3, tmp12, None)
tl.store(out_ptr2 + x3, tmp13, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_5(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + x3, tmp6, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (3, 3, 1, 1), (3, 1, 1, 1))
assert_size_stride(primals_2, (3,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (3, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_5, (3,), (1,))
assert_size_stride(primals_6, (3, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_7, (3,), (1,))
assert_size_stride(primals_8, (3, 6, 7, 7), (294, 49, 7, 1))
assert_size_stride(primals_9, (3,), (1,))
assert_size_stride(primals_10, (3, 12, 3, 3), (108, 9, 3, 1))
assert_size_stride(primals_11, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(49152)](buf1, primals_2,
49152, XBLOCK=512, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf3 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_1[grid(98304)](buf1, buf2, primals_5, buf3,
98304, XBLOCK=512, num_warps=8, num_stages=1)
buf4 = extern_kernels.convolution(buf3, primals_6, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf5 = empty_strided_cuda((4, 6, 64, 64), (24576, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_2[grid(98304)](buf2, primals_5, buf4,
primals_7, buf5, 98304, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_8, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf7 = empty_strided_cuda((4, 12, 64, 64), (49152, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_3[grid(196608)](buf1, buf2, primals_5, buf4,
primals_7, buf6, primals_9, buf7, 196608, XBLOCK=512, num_warps
=8, num_stages=1)
buf8 = extern_kernels.convolution(buf7, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf9 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.float32)
buf11 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.bool)
buf10 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.bool)
triton_poi_fused_add_convolution_mul_relu_sub_threshold_backward_4[grid
(49152)](buf8, primals_11, primals_3, buf9, buf11, buf10, 49152,
XBLOCK=512, num_warps=4, num_stages=1)
del buf8
del primals_11
buf12 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_5[grid(49152)](
buf6, primals_9, buf12, 49152, XBLOCK=512, num_warps=4,
num_stages=1)
del buf6
del primals_9
buf13 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_5[grid(49152)](
buf4, primals_7, buf13, 49152, XBLOCK=512, num_warps=4,
num_stages=1)
del buf4
del primals_7
buf14 = empty_strided_cuda((4, 3, 64, 64), (12288, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_5[grid(49152)](
buf2, primals_5, buf14, 49152, XBLOCK=512, num_warps=4,
num_stages=1)
del buf2
del primals_5
return (buf9, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, buf1, buf3, buf5, buf7, buf10, buf11, buf12, buf13, buf14)
class AODnetNew(nn.Module):
def __init__(self):
super(AODnetNew, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=1)
self.conv2 = nn.Conv2d(in_channels=3, out_channels=3, kernel_size=3,
padding=1)
self.conv3 = nn.Conv2d(in_channels=6, out_channels=3, kernel_size=5,
padding=2)
self.conv4 = nn.Conv2d(in_channels=6, out_channels=3, kernel_size=7,
padding=3)
self.conv5 = nn.Conv2d(in_channels=12, out_channels=3, kernel_size=
3, padding=1)
self.b = 1
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.conv5.weight
primals_11 = self.conv5.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
| misads/cv_template | AODnet | false | 16,106 | [
"MIT"
] | 69 | 9976ee0ada449a494d26f896c598610f233edc10 | https://github.com/misads/cv_template/tree/9976ee0ada449a494d26f896c598610f233edc10 |
ClassicalConv5 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/of/cofgwb3vhgu3642p2xyascopk5d3ugqjcjkdgn3lryrfa3gz5unw.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [4, 4]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (64 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (65 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (66 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (67 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (128 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (129 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (130 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (131 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (192 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (193 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (194 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (195 + (4*x0) + (256*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + (x2), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cy/ccyojkpx43k5pgtwa2byecil57lweiyjjkqrsv23rii3oqnm6fk4.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/oj/cojqknocmn4drzmdartfcdsvk3jhz65nil4x2gpmirq4cuh6g76u.py
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_3 => convolution_1
# x_4 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gp/cgpegtbpo2vcmtczcknhn6gw3l3khjbdmjhkpkm6yp2l7beuwunh.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_5 => getitem_1
# Graph fragment:
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/k2/ck274xuogi33iabbcrtrald32brwduzup2skcfize4u22d3xac64.py
# Topologically Sorted Source Nodes: [mean, x_7], Original ATen: [aten.mean, aten.squeeze]
# Source node to ATen node mapping:
# mean => mean
# x_7 => squeeze
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1]), kwargs = {})
# %squeeze : [num_users=2] = call_function[target=torch.ops.aten.squeeze.default](args = (%mean,), kwargs = {})
triton_per_fused_mean_squeeze_4 = async_compile.triton('triton_per_fused_mean_squeeze_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 64],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_squeeze_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_squeeze_4(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + ((2*(r1 % 8)) + (32*(r1 // 8)) + (256*x0)), xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + (1 + (2*(r1 % 8)) + (32*(r1 // 8)) + (256*x0)), xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (16 + (2*(r1 % 8)) + (32*(r1 // 8)) + (256*x0)), xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr0 + (17 + (2*(r1 % 8)) + (32*(r1 // 8)) + (256*x0)), xmask, eviction_policy='evict_last', other=0.0)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = 64.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zu/czukfjkxdi5wcvpoprukvawk3cywo4tbhqvzkk76a7zuwgrg7b3s.py
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_9 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kw/ckwrps2tqe3aykfpglbriki576qircverxupbkuj4bsydpwuiyls.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_6 = async_compile.triton('triton_per_fused__log_softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 64), (64, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (10, 64), (64, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 16, 16), (256, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 16, 16), (8192, 256, 16, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf2, primals_3, 32768, grid=grid(32768), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 64, 16, 16), (16384, 256, 16, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [x_3, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf4, primals_5, 65536, grid=grid(65536), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf4, buf5, 16384, grid=grid(16384), stream=stream0)
buf6 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [mean, x_7], Original ATen: [aten.mean, aten.squeeze]
triton_per_fused_mean_squeeze_4.run(buf7, buf4, 256, 64, grid=grid(256), stream=stream0)
buf8 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf7, reinterpret_tensor(primals_6, (64, 64), (1, 64), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf9, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf13 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_6.run(buf10, buf13, 4, 10, grid=grid(4), stream=stream0)
del buf10
return (buf13, primals_2, primals_4, buf0, buf2, buf4, buf5, buf7, buf9, buf13, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.utils.prune
import torch.backends.cudnn
import torch.cuda
import torch.nn
import torch.utils.data
class ClassicalConv5(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1, padding=1)
self.conv2 = nn.Conv2d(32, 64, 3, 1, padding=1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(64, 64)
self.fc2 = nn.Linear(64, 10)
def forward(self, x):
x = F.avg_pool2d(x, 4)
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = x.view(x.shape[0], x.shape[1], -1)
x = x.mean(-1).squeeze()
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.utils.prune
import torch.backends.cudnn
import torch.cuda
import torch.nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4 * x0 + 256 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (64 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (65 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (66 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (67 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (128 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (129 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (130 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (131 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (192 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (193 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (194 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (195 + 4 * x0 + 256 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x2, tmp32, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
@triton.jit
def triton_per_fused_mean_squeeze_4(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (2 * (r1 % 8) + 32 * (r1 // 8) + 256 * x0),
xmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + (1 + 2 * (r1 % 8) + 32 * (r1 // 8) + 256 * x0),
xmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (16 + 2 * (r1 % 8) + 32 * (r1 // 8) + 256 * x0
), xmask, eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr0 + (17 + 2 * (r1 % 8) + 32 * (r1 // 8) + 256 * x0
), xmask, eviction_policy='evict_last', other=0.0)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = 64.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_2, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64), (64, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (10, 64), (64, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 16, 16), (256, 256, 16, 1), torch.
float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(1024)](primals_1, buf0, 1024,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 16, 16), (8192, 256, 16, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_relu_1[grid(32768)](buf2, primals_3,
32768, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 64, 16, 16), (16384, 256, 16, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_2[grid(65536)](buf4, primals_5,
65536, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((4, 64, 8, 8), (4096, 64, 8, 1), torch.int8)
triton_poi_fused_max_pool2d_with_indices_3[grid(16384)](buf4, buf5,
16384, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf7 = buf6
del buf6
triton_per_fused_mean_squeeze_4[grid(256)](buf7, buf4, 256, 64,
XBLOCK=32, num_warps=8, num_stages=1)
buf8 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(buf7, reinterpret_tensor(primals_6, (64, 64), (1,
64), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_5[grid(256)](buf9, primals_7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(64, 10), (1, 64), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf13 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_6[grid(4)](buf10, buf13, 4, 10,
XBLOCK=1, num_warps=2, num_stages=1)
del buf10
return (buf13, primals_2, primals_4, buf0, buf2, buf4, buf5, buf7, buf9,
buf13, primals_8, primals_6)
class ClassicalConv5New(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1, padding=1)
self.conv2 = nn.Conv2d(32, 64, 3, 1, padding=1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(64, 64)
self.fc2 = nn.Linear(64, 10)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| mit-han-lab/pytorch-quantum | ClassicalConv5 | false | 16,107 | [
"MIT"
] | 98 | 05cf000d689307f6b1fe02d12744ad455685935b | https://github.com/mit-han-lab/pytorch-quantum/tree/05cf000d689307f6b1fe02d12744ad455685935b |
Generator | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/nf/cnfa7megno72lgzswa6v64tqwzcmxhuwhptaxxhn66kasuajcqij.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output_1 => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_7, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_7, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 5
x5 = xindex
x6 = (xindex // 4)
tmp3 = tl.load(in_ptr0 + (x5), xmask)
tmp6 = tl.load(in_ptr0 + (4*x6), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + (4*x6)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x6)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (3 + (4*x6)), xmask, eviction_policy='evict_last')
tmp0 = x2
tmp1 = tl.full([1], 4, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = float("-inf")
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp9 = tl.where(tmp2, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tl.where(tmp2, tmp4, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp15 = tl.where(tmp2, tmp4, tmp14)
tmp16 = triton_helpers.maximum(tmp13, tmp15)
tmp17 = tmp5 - tmp16
tl.store(out_ptr0 + (x5), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wj/cwjmw64zro3ycuqcqeczx34vvt4dugdgbe5vjlegorl63ssd5zbt.py
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output_1 => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_1 = async_compile.triton('triton_poi_fused__log_softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 5, 4, 4), (80, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((80, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (80, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(buf0, buf1, 320, grid=grid(320), stream=stream0)
buf2 = reinterpret_tensor(buf0, (4, 5, 4, 4), (80, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [output_1], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_1.run(buf1, buf2, 320, grid=grid(320), stream=stream0)
del buf1
return (buf2, reinterpret_tensor(primals_3, (80, 4), (4, 1), 0), buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 5, 4, 4), (80, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
def gumbel_softmax(logits, tau=1.0, hard=False, log_mode=True, dim=-1):
while True:
gumbels = -torch.empty_like(logits).exponential_().log()
gumbels = (logits + gumbels) / tau
if log_mode:
y_soft = gumbels.log_softmax(dim)
else:
y_soft = gumbels.softmax(dim)
if torch.sum(torch.isnan(y_soft)).item() < 0.01:
break
if hard:
index = y_soft.max(dim, keepdim=True)[1]
y_hard = torch.zeros_like(logits).scatter_(dim, index, 1.0)
ret = y_hard - y_soft.detach() + y_soft
else:
ret = y_soft
return ret
class Generator(nn.Module):
def __init__(self, vocab_size, dec_hidden_size, pad_idx):
super(Generator, self).__init__()
self.linear = nn.Linear(dec_hidden_size, vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
self.pad_idx = pad_idx
def forward(self, x, use_gumbel_softmax=False):
output = self.linear(x)
output[:, self.pad_idx] = -float('inf')
if use_gumbel_softmax:
output = gumbel_softmax(output, log_mode=True, dim=-1)
else:
output = self.softmax(output)
return output
def get_inputs():
return [torch.rand([4, 5, 4, 4])]
def get_init_inputs():
return [[], {'vocab_size': 4, 'dec_hidden_size': 4, 'pad_idx': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 5
x5 = xindex
x6 = xindex // 4
tmp3 = tl.load(in_ptr0 + x5, xmask)
tmp6 = tl.load(in_ptr0 + 4 * x6, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (1 + 4 * x6), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp14 = tl.load(in_ptr0 + (3 + 4 * x6), xmask, eviction_policy='evict_last'
)
tmp0 = x2
tmp1 = tl.full([1], 4, tl.int32)
tmp2 = tmp0 == tmp1
tmp4 = float('-inf')
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp9 = tl.where(tmp2, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tl.where(tmp2, tmp4, tmp11)
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp15 = tl.where(tmp2, tmp4, tmp14)
tmp16 = triton_helpers.maximum(tmp13, tmp15)
tmp17 = tmp5 - tmp16
tl.store(out_ptr0 + x5, tmp17, xmask)
@triton.jit
def triton_poi_fused__log_softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 320
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp5 + tmp7
tmp10 = tl_math.exp(tmp9)
tmp11 = tmp8 + tmp10
tmp12 = tl_math.log(tmp11)
tmp13 = tmp0 - tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 5, 4, 4), (80, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((80, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (80,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 5, 4, 4), (80, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(320)](buf0, buf1, 320, XBLOCK=
256, num_warps=4, num_stages=1)
buf2 = reinterpret_tensor(buf0, (4, 5, 4, 4), (80, 16, 4, 1), 0)
del buf0
triton_poi_fused__log_softmax_1[grid(320)](buf1, buf2, 320, XBLOCK=
256, num_warps=4, num_stages=1)
del buf1
return buf2, reinterpret_tensor(primals_3, (80, 4), (4, 1), 0), buf2
def gumbel_softmax(logits, tau=1.0, hard=False, log_mode=True, dim=-1):
while True:
gumbels = -torch.empty_like(logits).exponential_().log()
gumbels = (logits + gumbels) / tau
if log_mode:
y_soft = gumbels.log_softmax(dim)
else:
y_soft = gumbels.softmax(dim)
if torch.sum(torch.isnan(y_soft)).item() < 0.01:
break
if hard:
index = y_soft.max(dim, keepdim=True)[1]
y_hard = torch.zeros_like(logits).scatter_(dim, index, 1.0)
ret = y_hard - y_soft.detach() + y_soft
else:
ret = y_soft
return ret
class GeneratorNew(nn.Module):
def __init__(self, vocab_size, dec_hidden_size, pad_idx):
super(GeneratorNew, self).__init__()
self.linear = nn.Linear(dec_hidden_size, vocab_size)
self.softmax = nn.LogSoftmax(dim=-1)
self.pad_idx = pad_idx
def forward(self, input_0):
primals_1 = self.linear.weight
primals_2 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| mosespv96/SCAPT-ABSA | Generator | false | 16,108 | [
"MIT"
] | 49 | 6f7f89a131127f262a8d1fd2774e5a96b58e7193 | https://github.com/mosespv96/SCAPT-ABSA/tree/6f7f89a131127f262a8d1fd2774e5a96b58e7193 |
ShiftSoftplus | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/im/cimgqeuv3wxx43tl5inlewtmlhgsssqq5ly4qz44xnlpnnfsiltd.py
# Topologically Sorted Source Nodes: [softplus, wrapped_log, sub], Original ATen: [aten.softplus, aten.log, aten.sub]
# Source node to ATen node mapping:
# softplus => exp, gt, log1p, where
# sub => sub
# wrapped_log => full_default
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.6931471805599453), kwargs = {dtype: torch.float64, layout: torch.strided, device: cpu, pin_memory: False})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %full_default), kwargs = {})
triton_poi_fused_log_softplus_sub_0 = async_compile.triton('triton_poi_fused_log_softplus_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_log_softplus_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_log_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 0.6931471805599453
tmp7 = tmp5 - tmp6
tl.store(out_ptr0 + (x0), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softplus, wrapped_log, sub], Original ATen: [aten.softplus, aten.log, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_log_softplus_sub_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch.nn import Softplus
class ShiftSoftplus(Softplus):
"""
Shiftsoft plus activation function:
1/beta * (log(1 + exp**(beta * x)) - log(shift))
"""
def __init__(self, beta=1, shift=2, threshold=20):
super().__init__(beta, threshold)
self.shift = shift
self.softplus = Softplus(beta, threshold)
def forward(self, input):
return self.softplus(input) - np.log(float(self.shift))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch.nn import Softplus
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_log_softplus_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 0.6931471805599453
tmp7 = tmp5 - tmp6
tl.store(out_ptr0 + x0, tmp7, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_log_softplus_sub_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ShiftSoftplusNew(Softplus):
"""
Shiftsoft plus activation function:
1/beta * (log(1 + exp**(beta * x)) - log(shift))
"""
def __init__(self, beta=1, shift=2, threshold=20):
super().__init__(beta, threshold)
self.shift = shift
self.softplus = Softplus(beta, threshold)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mufeili/Alchemy | ShiftSoftplus | false | 16,109 | [
"MIT"
] | 116 | 659c59fbbe93d406f8b3e0711e5a048e58c9c43c | https://github.com/mufeili/Alchemy/tree/659c59fbbe93d406f8b3e0711e5a048e58c9c43c |
Sinh | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xh/cxhyn5exopprfo54ilww227h6hrwsbunqvmhogfeglaq6ercfyvl.py
# Topologically Sorted Source Nodes: [sinh], Original ATen: [aten.sinh]
# Source node to ATen node mapping:
# sinh => sinh
# Graph fragment:
# %sinh : [num_users=1] = call_function[target=torch.ops.aten.sinh.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_sinh_0 = async_compile.triton('triton_poi_fused_sinh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sinh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sinh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.sinh(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sinh], Original ATen: [aten.sinh]
stream0 = get_raw_stream(0)
triton_poi_fused_sinh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Sinh(nn.Module):
def forward(self, x):
return torch.sinh(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sinh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.sinh(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sinh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SinhNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Sinh | false | 16,110 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
VAE | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3q/c3qwr2d2rrpjzvnddomnmdy6cwva4hjlvrn2y5epemk4ak3k2m6c.py
# Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# h1 => relu
# Graph fragment:
# %add_tensor_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_3), kwargs = {})
# %relu : [num_users=3] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_2,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jd/cjdtedosfkoutmd76tpyaejxhpwspa7takf5bagemxu5kt4jquxx.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_sigmoid_1 = async_compile.triton('triton_poi_fused_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 784
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (400, 784), (784, 1))
assert_size_stride(primals_3, (400, ), (1, ))
assert_size_stride(primals_4, (20, 400), (400, 1))
assert_size_stride(primals_5, (20, ), (1, ))
assert_size_stride(primals_6, (20, 400), (400, 1))
assert_size_stride(primals_7, (20, ), (1, ))
assert_size_stride(primals_8, (400, 20), (20, 1))
assert_size_stride(primals_9, (400, ), (1, ))
assert_size_stride(primals_10, (784, 400), (400, 1))
assert_size_stride(primals_11, (784, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784, 400), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [h1], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(buf1, primals_3, 1600, grid=grid(1600), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
# Topologically Sorted Source Nodes: [logvar], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6, (400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_8, (20, 400), (1, 20), 0), out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [h3], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(buf5, primals_9, 1600, grid=grid(1600), stream=stream0)
del primals_9
buf6 = empty_strided_cuda((4, 784), (784, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf5, reinterpret_tensor(primals_10, (400, 784), (1, 400), 0), out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_1.run(buf7, primals_11, 3136, grid=grid(3136), stream=stream0)
del primals_11
return (buf7, buf2, buf3, primals_1, buf1, buf2, buf5, buf7, primals_10, primals_8, primals_6, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((400, 784), (784, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((20, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((20, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((20, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((400, 20), (20, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((784, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((784, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 784))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
def get_inputs():
return [torch.rand([4, 784])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_sigmoid_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 3136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 784
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (4, 784), (784, 1))
assert_size_stride(primals_2, (400, 784), (784, 1))
assert_size_stride(primals_3, (400,), (1,))
assert_size_stride(primals_4, (20, 400), (400, 1))
assert_size_stride(primals_5, (20,), (1,))
assert_size_stride(primals_6, (20, 400), (400, 1))
assert_size_stride(primals_7, (20,), (1,))
assert_size_stride(primals_8, (400, 20), (20, 1))
assert_size_stride(primals_9, (400,), (1,))
assert_size_stride(primals_10, (784, 400), (400, 1))
assert_size_stride(primals_11, (784,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (784,
400), (1, 784), 0), out=buf0)
del primals_2
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_relu_0[grid(1600)](buf1, primals_3, 1600, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.addmm(primals_5, buf1, reinterpret_tensor(primals_4,
(400, 20), (1, 400), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 20), (20, 1), torch.float32)
extern_kernels.addmm(primals_7, buf1, reinterpret_tensor(primals_6,
(400, 20), (1, 400), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_8, (20, 400), (1,
20), 0), out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_relu_0[grid(1600)](buf5, primals_9, 1600, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf6 = empty_strided_cuda((4, 784), (784, 1), torch.float32)
extern_kernels.mm(buf5, reinterpret_tensor(primals_10, (400, 784),
(1, 400), 0), out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_sigmoid_1[grid(3136)](buf7, primals_11, 3136,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_11
return (buf7, buf2, buf3, primals_1, buf1, buf2, buf5, buf7, primals_10,
primals_8, primals_6, primals_4)
class VAENew(nn.Module):
def __init__(self):
super(VAENew, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc21.weight
primals_5 = self.fc21.bias
primals_6 = self.fc22.weight
primals_7 = self.fc22.bias
primals_8 = self.fc3.weight
primals_9 = self.fc3.bias
primals_10 = self.fc4.weight
primals_11 = self.fc4.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1], output[2]
| momohatt/chainer-compiler | VAE | false | 16,111 | [
"MIT"
] | 116 | 26782cd29a5becf8e2badf268b47d98b3a6aea1d | https://github.com/momohatt/chainer-compiler/tree/26782cd29a5becf8e2badf268b47d98b3a6aea1d |
idct_8x8 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/b4/cb4ar3ryyt4mp4d5ipmgfvm4veyllxljd2ymzyfikn6eexxqe2ep.py
# Topologically Sorted Source Nodes: [image], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# image => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/y5/cy5rhdemzf6dqbzpnuqnokcny6y6tjsftb3h6y7lac6lexyo22ba.py
# Topologically Sorted Source Nodes: [mul_1, result], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# mul_1 => mul_1
# result => add
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 0.25), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 128), kwargs = {})
triton_poi_fused_add_mul_1 = async_compile.triton('triton_poi_fused_add_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_1(in_out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = 128.0
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 8), (8, 1))
assert_size_stride(primals_2, (4, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_3, (8, 8, 8, 8), (512, 64, 8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [image], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_2, primals_1, buf0, 1024, grid=grid(1024), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensordot], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 64), (64, 1), 0), reinterpret_tensor(primals_3, (64, 64), (64, 1), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 8, 8), (256, 64, 8, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [mul_1, result], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_1.run(buf2, 1024, grid=grid(1024), stream=stream0)
return (buf2, primals_2, reinterpret_tensor(buf0, (64, 16), (1, 64), 0), reinterpret_tensor(primals_3, (64, 64), (1, 64), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 8, 8), (256, 64, 8, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, 8, 8, 8), (512, 64, 8, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import itertools
import torch
import numpy as np
import torch.nn as nn
class idct_8x8(nn.Module):
""" Inverse discrete Cosine Transformation
Input:
dcp(tensor): batch x height x width
Output:
image(tensor): batch x height x width
"""
def __init__(self):
super(idct_8x8, self).__init__()
alpha = np.array([1.0 / np.sqrt(2)] + [1] * 7)
self.alpha = nn.Parameter(torch.from_numpy(np.outer(alpha, alpha)).
float())
tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
for x, y, u, v in itertools.product(range(8), repeat=4):
tensor[x, y, u, v] = np.cos((2 * u + 1) * x * np.pi / 16) * np.cos(
(2 * v + 1) * y * np.pi / 16)
self.tensor = nn.Parameter(torch.from_numpy(tensor).float())
def forward(self, image):
image = image * self.alpha
result = 0.25 * torch.tensordot(image, self.tensor, dims=2) + 128
result.view(image.shape)
return result
def get_inputs():
return [torch.rand([4, 4, 8, 8])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import itertools
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused_add_mul_1(in_out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = 128.0
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (8, 8), (8, 1))
assert_size_stride(primals_2, (4, 4, 8, 8), (256, 64, 8, 1))
assert_size_stride(primals_3, (8, 8, 8, 8), (512, 64, 8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(1024)](primals_2, primals_1, buf0, 1024,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 64), (64, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 64), (64, 1), 0),
reinterpret_tensor(primals_3, (64, 64), (64, 1), 0), out=buf1)
buf2 = reinterpret_tensor(buf1, (4, 4, 8, 8), (256, 64, 8, 1), 0)
del buf1
triton_poi_fused_add_mul_1[grid(1024)](buf2, 1024, XBLOCK=256,
num_warps=4, num_stages=1)
return buf2, primals_2, reinterpret_tensor(buf0, (64, 16), (1, 64), 0
), reinterpret_tensor(primals_3, (64, 64), (1, 64), 0)
class idct_8x8New(nn.Module):
""" Inverse discrete Cosine Transformation
Input:
dcp(tensor): batch x height x width
Output:
image(tensor): batch x height x width
"""
def __init__(self):
super(idct_8x8New, self).__init__()
alpha = np.array([1.0 / np.sqrt(2)] + [1] * 7)
self.alpha = nn.Parameter(torch.from_numpy(np.outer(alpha, alpha)).
float())
tensor = np.zeros((8, 8, 8, 8), dtype=np.float32)
for x, y, u, v in itertools.product(range(8), repeat=4):
tensor[x, y, u, v] = np.cos((2 * u + 1) * x * np.pi / 16) * np.cos(
(2 * v + 1) * y * np.pi / 16)
self.tensor = nn.Parameter(torch.from_numpy(tensor).float())
def forward(self, input_0):
primals_1 = self.alpha
primals_3 = self.tensor
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| mlomnitz/DifferentiableJPEG | idct_8x8 | false | 16,112 | [
"MIT"
] | 86 | a5767feba955a1bcb78600135a09c36a806f6249 | https://github.com/mlomnitz/DifferentiableJPEG/tree/a5767feba955a1bcb78600135a09c36a806f6249 |
NextSentencePrediction | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# linear => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t6/ct6t4t4f6df2rswen66qmskrag4dcnuaoxkebk4pssna63yxl3v3.py
# Topologically Sorted Source Nodes: [linear, log_softmax], Original ATen: [aten.add, aten._log_softmax]
# Source node to ATen node mapping:
# linear => add
# log_softmax => amax, sub
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%add, [-1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %amax), kwargs = {})
triton_poi_fused__log_softmax_add_1 = async_compile.triton('triton_poi_fused__log_softmax_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_add_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1))
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp6 = tmp3 + tmp5
tmp10 = tmp7 + tmp9
tmp11 = triton_helpers.maximum(tmp6, tmp10)
tmp12 = tmp2 - tmp11
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jt/cjtlgs566wh4nqwf6wvcsracaovzta35wbm5vllgfuavp5ebkiq4.py
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# log_softmax => exp, log, sub_1, sum_1
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_poi_fused__log_softmax_2 = async_compile.triton('triton_poi_fused__log_softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (2*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (2*x1)), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp6 = tl_math.log(tmp5)
tmp7 = tmp0 - tmp6
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear, log_softmax], Original ATen: [aten.add, aten._log_softmax]
triton_poi_fused__log_softmax_add_1.run(buf1, primals_3, buf2, 32, grid=grid(32), stream=stream0)
del primals_3
buf3 = reinterpret_tensor(buf1, (4, 4, 2), (8, 2, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [log_softmax], Original ATen: [aten._log_softmax]
triton_poi_fused__log_softmax_2.run(buf2, buf3, 32, grid=grid(32), stream=stream0)
del buf2
return (buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((2, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class NextSentencePrediction(nn.Module):
"""
2-class classification model : is_next, is_not_next
"""
def __init__(self, hidden):
"""
:param hidden: BERT model output size
"""
super().__init__()
self.linear = nn.Linear(hidden, 2)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, x):
return self.softmax(self.linear(x[:, 0]))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused__log_softmax_add_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 2
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + 1)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK])
tmp2 = tmp0 + tmp1
tmp6 = tmp3 + tmp5
tmp10 = tmp7 + tmp9
tmp11 = triton_helpers.maximum(tmp6, tmp10)
tmp12 = tmp2 - tmp11
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused__log_softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 2 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 2 * x1), xmask, eviction_policy='evict_last')
tmp2 = tl_math.exp(tmp1)
tmp4 = tl_math.exp(tmp3)
tmp5 = tmp2 + tmp4
tmp6 = tl_math.log(tmp5)
tmp7 = tmp0 - tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (2, 4), (4, 1))
assert_size_stride(primals_3, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 2), (2, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 2), (1, 4), 0), out=buf1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 2), (8, 2, 1), torch.float32)
triton_poi_fused__log_softmax_add_1[grid(32)](buf1, primals_3, buf2,
32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_3
buf3 = reinterpret_tensor(buf1, (4, 4, 2), (8, 2, 1), 0)
del buf1
triton_poi_fused__log_softmax_2[grid(32)](buf2, buf3, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del buf2
return buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf3
class NextSentencePredictionNew(nn.Module):
"""
2-class classification model : is_next, is_not_next
"""
def __init__(self, hidden):
"""
:param hidden: BERT model output size
"""
super().__init__()
self.linear = nn.Linear(hidden, 2)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, input_0):
primals_2 = self.linear.weight
primals_3 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| mortonjt/BERT-pytorch | NextSentencePrediction | false | 16,113 | [
"Apache-2.0"
] | 5,013 | d10dc4f9d5a6f2ca74380f62039526eb7277c671 | https://github.com/mortonjt/BERT-pytorch/tree/d10dc4f9d5a6f2ca74380f62039526eb7277c671 |
ClassicalConv6 | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/sj/csj6uus7z5hpvi77pvgp63jx4bne5i65mpzpsuvveo3mzfov6ycm.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/us/cusrll7pxpmm3esyc6vi5iyfdkk7oqmzx4mzxbsk3ry5fu4y7rpi.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# x_2 => convolution_1
# x_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5m/c5mnbdvoaqjef7kiuovjkacsd2buyyvwevhsfbn6v54mmppt7fpf.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_4 => getitem_1
# Graph fragment:
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7k/c7km5kgkby27u4k6katdlgl5htaal5dbuju5n33hzf4dbxtswoyw.py
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# mean => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1]), kwargs = {})
triton_red_fused_mean_3 = async_compile.triton('triton_red_fused_mean_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[2048, 128],
reduction_hint=ReductionHint.OUTER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused_mean_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused_mean_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 2048
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp8 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + ((2*(r1 % 32)) + (128*(r1 // 32)) + (512*x0)), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + (1 + (2*(r1 % 32)) + (128*(r1 // 32)) + (512*x0)), rmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (64 + (2*(r1 % 32)) + (128*(r1 // 32)) + (512*x0)), rmask, eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr0 + (65 + (2*(r1 % 32)) + (128*(r1 // 32)) + (512*x0)), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = _tmp8 + tmp7
_tmp8 = tl.where(rmask, tmp9, _tmp8)
tmp8 = tl.sum(_tmp8, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rb/crbekklnbr62tyciv7pgsttnzj5vs4jx4t3lxyrty6h3crlxoy5k.py
# Topologically Sorted Source Nodes: [mean, x_6], Original ATen: [aten.mean, aten.squeeze]
# Source node to ATen node mapping:
# mean => mean
# x_6 => squeeze
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view, [-1]), kwargs = {})
# %squeeze : [num_users=2] = call_function[target=torch.ops.aten.squeeze.default](args = (%mean,), kwargs = {})
triton_per_fused_mean_squeeze_4 = async_compile.triton('triton_per_fused_mean_squeeze_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 8],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_squeeze_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_squeeze_4(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 256
rnumel = 8
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (8*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 1024.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zu/czukfjkxdi5wcvpoprukvawk3cywo4tbhqvzkk76a7zuwgrg7b3s.py
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_8 => relu_2
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_7), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kw/ckwrps2tqe3aykfpglbriki576qircverxupbkuj4bsydpwuiyls.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# output => amax, exp, log, sub, sub_1, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%addmm_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%addmm_1, %amax), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
triton_per_fused__log_softmax_6 = async_compile.triton('triton_per_fused__log_softmax_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (10*x0)), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float("-inf"))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + (10*x0)), tmp12, rmask & xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (64, 64), (64, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (10, 64), (64, 1))
assert_size_stride(primals_9, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 524288, grid=grid(524288), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.int8)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_2.run(buf3, buf4, 262144, grid=grid(262144), stream=stream0)
buf5 = empty_strided_cuda((4, 64, 8), (512, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean], Original ATen: [aten.mean]
triton_red_fused_mean_3.run(buf3, buf5, 2048, 128, grid=grid(2048), stream=stream0)
buf6 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [mean, x_6], Original ATen: [aten.mean, aten.squeeze]
triton_per_fused_mean_squeeze_4.run(buf7, buf5, 256, 8, grid=grid(256), stream=stream0)
del buf5
buf8 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf7, reinterpret_tensor(primals_6, (64, 64), (1, 64), 0), out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf9, primals_7, 256, grid=grid(256), stream=stream0)
del primals_7
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8, (64, 10), (1, 64), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf13 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten._log_softmax]
triton_per_fused__log_softmax_6.run(buf10, buf13, 4, 10, grid=grid(4), stream=stream0)
del buf10
return (buf13, primals_1, primals_3, primals_4, buf1, buf3, buf4, buf7, buf9, buf13, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((10, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.utils.prune
import torch.backends.cudnn
import torch.cuda
import torch.nn
import torch.utils.data
class ClassicalConv6(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1, padding=1)
self.conv2 = nn.Conv2d(32, 64, 3, 1, padding=1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(64, 64)
self.fc2 = nn.Linear(64, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = x.view(x.shape[0], x.shape[1], -1)
x = x.mean(-1).squeeze()
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.utils.prune
import torch.backends.cudnn
import torch.cuda
import torch.nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_2(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, None)
@triton.jit
def triton_red_fused_mean_3(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr, RBLOCK: tl.constexpr):
rnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rbase = tl.arange(0, RBLOCK)[None, :]
x0 = xindex
_tmp8 = tl.full([XBLOCK, RBLOCK], 0, tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r1 = rindex
tmp0 = tl.load(in_ptr0 + (2 * (r1 % 32) + 128 * (r1 // 32) + 512 *
x0), rmask, eviction_policy='evict_last', other=0.0)
tmp1 = tl.load(in_ptr0 + (1 + 2 * (r1 % 32) + 128 * (r1 // 32) +
512 * x0), rmask, eviction_policy='evict_last', other=0.0)
tmp3 = tl.load(in_ptr0 + (64 + 2 * (r1 % 32) + 128 * (r1 // 32) +
512 * x0), rmask, eviction_policy='evict_last', other=0.0)
tmp5 = tl.load(in_ptr0 + (65 + 2 * (r1 % 32) + 128 * (r1 // 32) +
512 * x0), rmask, eviction_policy='evict_last', other=0.0)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = _tmp8 + tmp7
_tmp8 = tl.where(rmask, tmp9, _tmp8)
tmp8 = tl.sum(_tmp8, 1)[:, None]
tl.store(out_ptr0 + x0, tmp8, None)
@triton.jit
def triton_per_fused_mean_squeeze_4(in_out_ptr0, in_ptr0, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 256
RBLOCK: tl.constexpr = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 8 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 1024.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused__log_softmax_6(in_ptr0, out_ptr2, xnumel, rnumel,
XBLOCK: tl.constexpr):
xnumel = 4
rnumel = 10
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 10 * x0), rmask & xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(rmask & xmask, tmp1, float('-inf'))
tmp4 = triton_helpers.max2(tmp3, 1)[:, None]
tmp5 = tmp0 - tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.where(rmask & xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl_math.log(tmp10)
tmp12 = tmp5 - tmp11
tl.store(out_ptr2 + (r1 + 10 * x0), tmp12, rmask & xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (32, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (64, 64), (64, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (10, 64), (64, 1))
assert_size_stride(primals_9, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(524288)](buf1, primals_2,
524288, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(1048576)](buf3, primals_5,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.int8)
triton_poi_fused_max_pool2d_with_indices_2[grid(262144)](buf3, buf4,
262144, XBLOCK=512, num_warps=8, num_stages=1)
buf5 = empty_strided_cuda((4, 64, 8), (512, 8, 1), torch.float32)
triton_red_fused_mean_3[grid(2048)](buf3, buf5, 2048, 128, XBLOCK=
64, RBLOCK=8, num_warps=4, num_stages=1)
buf6 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
buf7 = buf6
del buf6
triton_per_fused_mean_squeeze_4[grid(256)](buf7, buf5, 256, 8,
XBLOCK=32, num_warps=2, num_stages=1)
del buf5
buf8 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(buf7, reinterpret_tensor(primals_6, (64, 64), (1,
64), 0), out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_relu_5[grid(256)](buf9, primals_7, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del primals_7
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_9, buf9, reinterpret_tensor(primals_8,
(64, 10), (1, 64), 0), alpha=1, beta=1, out=buf10)
del primals_9
buf13 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
triton_per_fused__log_softmax_6[grid(4)](buf10, buf13, 4, 10,
XBLOCK=1, num_warps=2, num_stages=1)
del buf10
return (buf13, primals_1, primals_3, primals_4, buf1, buf3, buf4, buf7,
buf9, buf13, primals_8, primals_6)
class ClassicalConv6New(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1, padding=1)
self.conv2 = nn.Conv2d(32, 64, 3, 1, padding=1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(64, 64)
self.fc2 = nn.Linear(64, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| mit-han-lab/pytorch-quantum | ClassicalConv6 | false | 16,114 | [
"MIT"
] | 98 | 05cf000d689307f6b1fe02d12744ad455685935b | https://github.com/mit-han-lab/pytorch-quantum/tree/05cf000d689307f6b1fe02d12744ad455685935b |
ComplexDropout | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/u5/cu56dhpcth43gy4shrd7mcexf4nfa6qetnnhwe4mno4v6ug76h6j.py
# Topologically Sorted Source Nodes: [dropout], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# dropout => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dropout], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dropout_1], Original ATen: [aten.clone]
triton_poi_fused_clone_0.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0)
del arg1_1
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils
class ComplexDropout(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(ComplexDropout, self).__init__()
self.p = p
self.inplace = inplace
self.dropout_r = nn.Dropout(p, inplace)
self.dropout_i = nn.Dropout(p, inplace)
def forward(self, input_r, input_i):
return self.dropout_r(input_r), self.dropout_i(input_i)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_0[grid(256)](arg1_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg1_1
return buf0, buf1
class ComplexDropoutNew(nn.Module):
def __init__(self, p=0.5, inplace=False):
super(ComplexDropoutNew, self).__init__()
self.p = p
self.inplace = inplace
self.dropout_r = nn.Dropout(p, inplace)
self.dropout_i = nn.Dropout(p, inplace)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
| muqiaoy/dl_signal | ComplexDropout | false | 16,115 | [
"MIT"
] | 54 | 3a30d14982016644bfc96a7d1ca0109b441f17fd | https://github.com/muqiaoy/dl_signal/tree/3a30d14982016644bfc96a7d1ca0109b441f17fd |
SeparableBlock | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mu/cmu5ttts2hdvlceyfi5clqpgl6qqngyzjwbvur2iymh2lu5srqjt.py
# Topologically Sorted Source Nodes: [kernel_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# kernel_2 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4096
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + (4*y3)), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vb/cvbebkbwqtzfbsixehaz4axca6thbasspzufvjiuomusl5mbpdpw.py
# Topologically Sorted Source Nodes: [kernel_2], Original ATen: [aten.add]
# Source node to ATen node mapping:
# kernel_2 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_9, %primals_9), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, 4), (4, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 64), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((1024, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [kernel], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf1, (1024, 4, 1), (4, 1, 1), 0), reinterpret_tensor(buf0, (1024, 1, 4), (4, 4, 1), 0), out=buf2)
buf3 = empty_strided_cuda((4096, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf2, (4096, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((64, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [kernel_2], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(buf3, primals_7, buf4, 4096, 4, grid=grid(4096, 4), stream=stream0)
del primals_7
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [kernel_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf4, (4096, 4), (4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (64, 4, 4, 4, 4), (256, 64, 16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [kernel_2], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf6, primals_9, 16384, grid=grid(16384), stream=stream0)
del primals_9
return (reinterpret_tensor(buf6, (64, 4, 4, 4, 4), (256, 1, 4, 64, 16), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf2, (4096, 4), (4, 1), 0), reinterpret_tensor(buf4, (4096, 4), (4, 1), 0), primals_8, primals_6, reinterpret_tensor(buf1, (1024, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (1024, 4, 1), (4, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch.nn import Linear
class SeparableBlock(Module):
def __init__(self, input_size, kernel_channels_in, kernel_channels_out,
kernel_size):
super(SeparableBlock, self).__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.kernel_channels_in = kernel_channels_in
self.kernel_channels_out = kernel_channels_out
self.make_kernel_in = Linear(input_size, kernel_size * kernel_size *
kernel_channels_in)
self.make_kernel_out = Linear(input_size, kernel_size * kernel_size *
kernel_channels_out)
self.kernel_linear_in = Linear(kernel_channels_in, kernel_channels_in)
self.kernel_linear_out = Linear(kernel_channels_out,
kernel_channels_out)
def forward(self, features):
features = features.view(-1, self.input_size)
kernel_in = self.make_kernel_in(features).view(-1, self.kernel_size,
self.kernel_size, 1, self.kernel_channels_in)
kernel_out = self.make_kernel_out(features).view(-1, self.
kernel_size, self.kernel_size, self.kernel_channels_out, 1)
kernel = torch.matmul(kernel_out, kernel_in)
kernel = self.kernel_linear_in(kernel).permute(0, 1, 2, 4, 3)
kernel = self.kernel_linear_out(kernel)
kernel = kernel.permute(0, 4, 3, 1, 2)
return kernel
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'kernel_channels_in': 4,
'kernel_channels_out': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
from torch.nn import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x2 + 4 * y3), tmp2, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (64, 4), (4, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 4), (4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4, 4), (4, 1))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 64), (1, 4),
0), alpha=1, beta=1, out=buf0)
del primals_2
del primals_3
buf1 = empty_strided_cuda((64, 64), (64, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_1, (64,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 64), (1, 4),
0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((1024, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf1, (1024, 4, 1), (4, 1, 1),
0), reinterpret_tensor(buf0, (1024, 1, 4), (4, 4, 1), 0), out=buf2)
buf3 = empty_strided_cuda((4096, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf2, (4096, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((64, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(4096, 4)](buf3, primals_7, buf4, 4096,
4, XBLOCK=4, YBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf5 = buf3
del buf3
extern_kernels.mm(reinterpret_tensor(buf4, (4096, 4), (4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf5)
buf6 = reinterpret_tensor(buf5, (64, 4, 4, 4, 4), (256, 64, 16, 4,
1), 0)
del buf5
triton_poi_fused_add_1[grid(16384)](buf6, primals_9, 16384, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
return reinterpret_tensor(buf6, (64, 4, 4, 4, 4), (256, 1, 4, 64, 16), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf2, (4096, 4), (4, 1), 0), reinterpret_tensor(
buf4, (4096, 4), (4, 1), 0), primals_8, primals_6, reinterpret_tensor(
buf1, (1024, 1, 4), (4, 1, 1), 0), reinterpret_tensor(buf0, (1024,
4, 1), (4, 1, 4), 0)
class SeparableBlockNew(Module):
def __init__(self, input_size, kernel_channels_in, kernel_channels_out,
kernel_size):
super(SeparableBlockNew, self).__init__()
self.input_size = input_size
self.kernel_size = kernel_size
self.kernel_channels_in = kernel_channels_in
self.kernel_channels_out = kernel_channels_out
self.make_kernel_in = Linear(input_size, kernel_size * kernel_size *
kernel_channels_in)
self.make_kernel_out = Linear(input_size, kernel_size * kernel_size *
kernel_channels_out)
self.kernel_linear_in = Linear(kernel_channels_in, kernel_channels_in)
self.kernel_linear_out = Linear(kernel_channels_out,
kernel_channels_out)
def forward(self, input_0):
primals_2 = self.make_kernel_in.weight
primals_3 = self.make_kernel_in.bias
primals_4 = self.make_kernel_out.weight
primals_5 = self.make_kernel_out.bias
primals_6 = self.kernel_linear_in.weight
primals_7 = self.kernel_linear_in.bias
primals_8 = self.kernel_linear_out.weight
primals_9 = self.kernel_linear_out.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| morzh/hyperstyle | SeparableBlock | false | 16,116 | [
"MIT"
] | 692 | ed87f620143d045f374aa42712a43abd751a90e6 | https://github.com/morzh/hyperstyle/tree/ed87f620143d045f374aa42712a43abd751a90e6 |
ComplexReLU | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/6q/c6q46q7lsepa4jw5qgcgbc5kiud5wm57hubk6vfo4gk47vl2tprk.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_relu_0 = async_compile.triton('triton_poi_fused_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu_1], Original ATen: [aten.relu]
triton_poi_fused_relu_0.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0)
del arg1_1
return (buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils
class ComplexReLU(nn.Module):
def __init__(self):
super(ComplexReLU, self).__init__()
self.relu_r = nn.ReLU()
self.relu_i = nn.ReLU()
def forward(self, input_r, input_i):
return self.relu_r(input_r), self.relu_i(input_i)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_relu_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_relu_0[grid(256)](arg0_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_relu_0[grid(256)](arg1_1, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del arg1_1
return buf0, buf1
class ComplexReLUNew(nn.Module):
def __init__(self):
super(ComplexReLUNew, self).__init__()
self.relu_r = nn.ReLU()
self.relu_i = nn.ReLU()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
| muqiaoy/dl_signal | ComplexReLU | false | 16,117 | [
"MIT"
] | 54 | 3a30d14982016644bfc96a7d1ca0109b441f17fd | https://github.com/muqiaoy/dl_signal/tree/3a30d14982016644bfc96a7d1ca0109b441f17fd |
LabelSmoothing | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/gs/cgsb4il4unvci3l2nd5qtuufuznqp24ykh5ko35prchka5kxxjdf.py
# Topologically Sorted Source Nodes: [probs, add_1, log, mul, sub, mul_1, target1, mul_2, sub_1, sub_2, add_2, log_1, mul_3, add_3, loss, mean], Original ATen: [aten.sigmoid, aten.add, aten.log, aten.mul, aten.rsub, aten.neg, aten.mean]
# Source node to ATen node mapping:
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# log => log
# log_1 => log_1
# loss => neg
# mean => mean
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# probs => sigmoid
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# target1 => add
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sigmoid, 1e-15), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, 1.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg1_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 0.0), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, %add), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %add), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub_2, 1e-15), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %log_1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%add_3,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {})
triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_0 = async_compile.triton('triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp5 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1e-15
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp8 = tmp6 - tmp5
tmp9 = 0.0
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp12 = tmp4 * tmp11
tmp13 = tmp6 - tmp11
tmp14 = tmp6 - tmp1
tmp15 = tmp14 + tmp2
tmp16 = tl_math.log(tmp15)
tmp17 = tmp13 * tmp16
tmp18 = tmp12 + tmp17
tmp19 = -tmp18
tmp20 = tl.broadcast_to(tmp19, [RBLOCK])
tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0))
tmp23 = 256.0
tmp24 = tmp22 / tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp24, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [probs, add_1, log, mul, sub, mul_1, target1, mul_2, sub_1, sub_2, add_2, log_1, mul_3, add_3, loss, mean], Original ATen: [aten.sigmoid, aten.add, aten.log, aten.mul, aten.rsub, aten.neg, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class LabelSmoothing(nn.Module):
"""
NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.0, n_cls=2):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(LabelSmoothing, self).__init__()
self.confidence = 1.0 - smoothing + smoothing / n_cls
self.smoothing = smoothing / n_cls
def forward(self, x, target):
probs = torch.nn.functional.sigmoid(x)
target1 = self.confidence * target + (1 - target) * self.smoothing
loss = -(torch.log(probs + 1e-15) * target1 + (1 - target1) * torch
.log(1 - probs + 1e-15))
return loss.mean()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp5 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp2 = 1e-15
tmp3 = tmp1 + tmp2
tmp4 = tl_math.log(tmp3)
tmp6 = 1.0
tmp7 = tmp5 * tmp6
tmp8 = tmp6 - tmp5
tmp9 = 0.0
tmp10 = tmp8 * tmp9
tmp11 = tmp7 + tmp10
tmp12 = tmp4 * tmp11
tmp13 = tmp6 - tmp11
tmp14 = tmp6 - tmp1
tmp15 = tmp14 + tmp2
tmp16 = tl_math.log(tmp15)
tmp17 = tmp13 * tmp16
tmp18 = tmp12 + tmp17
tmp19 = -tmp18
tmp20 = tl.broadcast_to(tmp19, [RBLOCK])
tmp22 = triton_helpers.promote_to_tensor(tl.sum(tmp20, 0))
tmp23 = 256.0
tmp24 = tmp22 / tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_log_mean_mul_neg_rsub_sigmoid_0[grid(1)](buf1,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class LabelSmoothingNew(nn.Module):
"""
NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.0, n_cls=2):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(LabelSmoothingNew, self).__init__()
self.confidence = 1.0 - smoothing + smoothing / n_cls
self.smoothing = smoothing / n_cls
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| mrblasco/kaggle_moa_winner_hungry_for_gold | LabelSmoothing | false | 16,118 | [
"Apache-2.0"
] | 89 | 00df6d0aa4a48e526cee3e36f6e723a1534bfa08 | https://github.com/mrblasco/kaggle_moa_winner_hungry_for_gold/tree/00df6d0aa4a48e526cee3e36f6e723a1534bfa08 |
ComplexConv1d | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/gi/cgia4tvd5fumnkgtqagflnhcrfhwpsmjd26j5ilpi4jdxru4aeum.py
# Topologically Sorted Source Nodes: [sub, add], Original ATen: [aten.sub, aten.add]
# Source node to ATen node mapping:
# add => add
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%squeeze, %squeeze_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%squeeze_2, %squeeze_3), kwargs = {})
triton_poi_fused_add_sub_0 = async_compile.triton('triton_poi_fused_add_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 2)
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_out_ptr1 + (x2), xmask)
tmp9 = tl.load(in_ptr3 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp8 = tmp7 + tmp1
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
tl.store(in_out_ptr1 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf0, (1, 4, 2), (8, 2, 1))
# Topologically Sorted Source Nodes: [conv1d_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_6, (1, 4, 4), (16, 4, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (1, 4, 2), (8, 2, 1))
# Topologically Sorted Source Nodes: [conv1d_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_6, (1, 4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf3, (1, 4, 2), (8, 2, 1))
# Topologically Sorted Source Nodes: [conv1d_3], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), primals_4, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf4, (1, 4, 2), (8, 2, 1))
buf2 = reinterpret_tensor(buf0, (4, 2), (2, 1), 0); del buf0 # reuse
buf5 = reinterpret_tensor(buf3, (4, 2), (2, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [sub, add], Original ATen: [aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_sub_0.run(buf2, buf5, primals_2, buf1, primals_5, buf4, 8, grid=grid(8), stream=stream0)
del buf1
del buf4
del primals_2
del primals_5
return (buf2, buf5, primals_1, primals_4, reinterpret_tensor(primals_3, (1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_6, (1, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3), (12, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils
class ComplexConv1d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(ComplexConv1d, self).__init__()
self.conv_r = nn.Conv1d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.conv_i = nn.Conv1d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
def forward(self, input_r, input_i):
return self.conv_r(input_r) - self.conv_i(input_i), self.conv_r(input_i
) + self.conv_i(input_r)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 2
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_out_ptr1 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp8 = tmp7 + tmp1
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tl.store(in_out_ptr0 + x2, tmp6, xmask)
tl.store(in_out_ptr1 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4, 3), (12, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf0, (1, 4, 2), (8, 2, 1))
buf1 = extern_kernels.convolution(reinterpret_tensor(primals_6, (1,
4, 4), (16, 4, 1), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf1, (1, 4, 2), (8, 2, 1))
buf3 = extern_kernels.convolution(reinterpret_tensor(primals_6, (1,
4, 4), (16, 4, 1), 0), primals_1, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf3, (1, 4, 2), (8, 2, 1))
buf4 = extern_kernels.convolution(reinterpret_tensor(primals_3, (1,
4, 4), (16, 4, 1), 0), primals_4, stride=(1,), padding=(0,),
dilation=(1,), transposed=False, output_padding=(0,), groups=1,
bias=None)
assert_size_stride(buf4, (1, 4, 2), (8, 2, 1))
buf2 = reinterpret_tensor(buf0, (4, 2), (2, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 2), (2, 1), 0)
del buf3
get_raw_stream(0)
triton_poi_fused_add_sub_0[grid(8)](buf2, buf5, primals_2, buf1,
primals_5, buf4, 8, XBLOCK=8, num_warps=1, num_stages=1)
del buf1
del buf4
del primals_2
del primals_5
return buf2, buf5, primals_1, primals_4, reinterpret_tensor(primals_3,
(1, 4, 4), (16, 4, 1), 0), reinterpret_tensor(primals_6, (1, 4, 4),
(16, 4, 1), 0)
class ComplexConv1dNew(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(ComplexConv1dNew, self).__init__()
self.conv_r = nn.Conv1d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
self.conv_i = nn.Conv1d(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
def forward(self, input_0, input_1):
primals_1 = self.conv_r.weight
primals_2 = self.conv_r.bias
primals_4 = self.conv_i.weight
primals_5 = self.conv_i.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| muqiaoy/dl_signal | ComplexConv1d | false | 16,119 | [
"MIT"
] | 54 | 3a30d14982016644bfc96a7d1ca0109b441f17fd | https://github.com/muqiaoy/dl_signal/tree/3a30d14982016644bfc96a7d1ca0109b441f17fd |
ComplexMaxPool1d | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/eg/ceg5r5lesy4yjcsgwggyxi634wo4m2ibfbfrzibrfm7cykiqvg76.py
# Topologically Sorted Source Nodes: [max_pool1d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool1d => _low_memory_max_pool2d_with_offsets
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=1] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%unsqueeze, [1, 4], [1, 4], [0, 0], [1, 1], False), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool1d], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 4, grid=grid(4), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool1d_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_0.run(arg1_1, buf1, 4, grid=grid(4), stream=stream0)
del arg1_1
return (reinterpret_tensor(buf0, (4, 1), (1, 1), 0), reinterpret_tensor(buf1, (4, 1), (1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils
class ComplexMaxPool1d(nn.Module):
def __init__(self, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False):
super(ComplexMaxPool1d, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.ceil_mode = ceil_mode
self.return_indices = return_indices
self.maxpool_r = nn.MaxPool1d(kernel_size=self.kernel_size, stride=
self.stride, padding=self.padding, dilation=self.dilation,
ceil_mode=self.ceil_mode, return_indices=self.return_indices)
self.maxpool_i = nn.MaxPool1d(kernel_size=self.kernel_size, stride=
self.stride, padding=self.padding, dilation=self.dilation,
ceil_mode=self.ceil_mode, return_indices=self.return_indices)
def forward(self, input_r, input_i):
return self.maxpool_r(input_r), self.maxpool_i(input_i)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tl.store(out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(4)](arg0_1, buf0, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 1, 1), (1, 1, 1), torch.float32)
triton_poi_fused_max_pool2d_with_indices_0[grid(4)](arg1_1, buf1, 4,
XBLOCK=4, num_warps=1, num_stages=1)
del arg1_1
return reinterpret_tensor(buf0, (4, 1), (1, 1), 0), reinterpret_tensor(buf1
, (4, 1), (1, 1), 0)
class ComplexMaxPool1dNew(nn.Module):
def __init__(self, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False):
super(ComplexMaxPool1dNew, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.ceil_mode = ceil_mode
self.return_indices = return_indices
self.maxpool_r = nn.MaxPool1d(kernel_size=self.kernel_size, stride=
self.stride, padding=self.padding, dilation=self.dilation,
ceil_mode=self.ceil_mode, return_indices=self.return_indices)
self.maxpool_i = nn.MaxPool1d(kernel_size=self.kernel_size, stride=
self.stride, padding=self.padding, dilation=self.dilation,
ceil_mode=self.ceil_mode, return_indices=self.return_indices)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
| muqiaoy/dl_signal | ComplexMaxPool1d | false | 16,120 | [
"MIT"
] | 54 | 3a30d14982016644bfc96a7d1ca0109b441f17fd | https://github.com/muqiaoy/dl_signal/tree/3a30d14982016644bfc96a7d1ca0109b441f17fd |
LinearWithGroupNorm | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ke/ckev4oib3cv2oksvgsnixm2ir66mykxwkb2phwtnkc7jitdkle52.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.native_group_norm, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => add, add_1, mul_1, rsqrt, var_mean
# out_2 => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_per_fused_native_group_norm_relu_threshold_backward_0 = async_compile.triton('triton_per_fused_native_group_norm_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp30 = 0.0
tmp31 = tmp29 <= tmp30
tl.store(out_ptr2 + (r1 + (16*x0)), tmp29, xmask)
tl.store(out_ptr3 + (r1 + (16*x0)), tmp31, xmask)
tl.store(out_ptr4 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.native_group_norm, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_relu_threshold_backward_0.run(buf0, primals_3, primals_4, buf1, buf5, buf6, buf4, 16, 16, grid=grid(16), stream=stream0)
del primals_4
return (buf5, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4), (4, 1), 0), buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
from torch import nn
from math import gcd
import torch.cuda
class LinearWithGroupNorm(nn.Module):
"""Linear layer with group normalization activation used in LaneGCN."""
def __init__(self, n_in: 'int', n_out: 'int', num_groups: 'int'=32,
activation: 'bool'=True) ->None:
"""
Initialize layer.
:param n_in: Number of input channels.
:param n_out: Number of output channels.
:param num_groups: Number of groups for GroupNorm.
:param activation: Boolean indicating whether to apply ReLU activation.
"""
super().__init__()
self.linear = nn.Linear(n_in, n_out, bias=False)
self.norm = nn.GroupNorm(gcd(num_groups, n_out), n_out)
self.relu = nn.ReLU(inplace=True)
self.activation = activation
def forward(self, x: 'torch.Tensor') ->torch.Tensor:
"""
Apply linear layer to input tensor.
:param x: Input tensor.
:return: Output of linear layer.
"""
out = self.linear(x)
out = self.norm(out)
if self.activation:
out = self.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'n_in': 4, 'n_out': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
from torch import nn
from math import gcd
import torch.cuda
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_threshold_backward_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp30 = 0.0
tmp31 = tmp29 <= tmp30
tl.store(out_ptr2 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp31, xmask)
tl.store(out_ptr4 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_threshold_backward_0[grid(16)](
buf0, primals_3, primals_4, buf1, buf5, buf6, buf4, 16, 16,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_4
return buf5, primals_3, reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0
), reinterpret_tensor(buf4, (4, 4), (4, 1), 0), buf6
class LinearWithGroupNormNew(nn.Module):
"""Linear layer with group normalization activation used in LaneGCN."""
def __init__(self, n_in: 'int', n_out: 'int', num_groups: 'int'=32,
activation: 'bool'=True) ->None:
"""
Initialize layer.
:param n_in: Number of input channels.
:param n_out: Number of output channels.
:param num_groups: Number of groups for GroupNorm.
:param activation: Boolean indicating whether to apply ReLU activation.
"""
super().__init__()
self.linear = nn.Linear(n_in, n_out, bias=False)
self.norm = nn.GroupNorm(gcd(num_groups, n_out), n_out)
self.relu = nn.ReLU(inplace=True)
self.activation = activation
def forward(self, input_0):
primals_1 = self.linear.weight
primals_3 = self.norm.weight
primals_4 = self.norm.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| motional/nuplan-devkit | LinearWithGroupNorm | false | 16,121 | [
"Apache-2.0"
] | 128 | e39029e788b17f47f2fcadb774098ef8fbdd0d67 | https://github.com/motional/nuplan-devkit/tree/e39029e788b17f47f2fcadb774098ef8fbdd0d67 |
CRNNcell | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/d3/cd33wohrdv66tzoju3okhwl7cuvsjcxafk6mcnxyf5ldval5oi7j.py
# Topologically Sorted Source Nodes: [in_to_hid, hid_to_hid, ih_to_ih, add, add_1, hidden], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# hid_to_hid => convolution_1
# hidden => relu
# ih_to_ih => convolution_2
# in_to_hid => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_6, %primals_4, %primals_5, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_9, %primals_7, %primals_8, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %convolution_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %convolution_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_add_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 25) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp4 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + (x3), xmask)
tmp8 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = 0.0
tmp14 = tmp12 <= tmp13
tl.store(in_out_ptr0 + (x3), tmp12, xmask)
tl.store(out_ptr0 + (x3), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [in_to_hid], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1))
# Topologically Sorted Source Nodes: [hid_to_hid], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
# Topologically Sorted Source Nodes: [ih_to_ih], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(primals_9, primals_7, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 5, 5), (100, 25, 5, 1))
buf3 = buf0; del buf0 # reuse
buf4 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
# Topologically Sorted Source Nodes: [in_to_hid, hid_to_hid, ih_to_ih, add, add_1, hidden], Original ATen: [aten.convolution, aten.add, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_add_convolution_relu_threshold_backward_0.run(buf3, primals_2, buf1, primals_5, buf2, primals_8, buf4, 400, grid=grid(400), stream=stream0)
del buf1
del buf2
del primals_2
del primals_5
del primals_8
return (buf3, primals_1, primals_3, primals_4, primals_6, primals_7, primals_9, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class CRNNcell(nn.Module):
"""
Convolutional RNN cell that evolves over both time and iterations
Parameters
-----------------
input: 4d tensor, shape (batch_size, channel, width, height)
hidden: hidden states in temporal dimension, 4d tensor, shape (batch_size, hidden_size, width, height)
hidden_iteration: hidden states in iteration dimension, 4d tensor, shape (batch_size, hidden_size, width, height)
Returns
-----------------
output: 4d tensor, shape (batch_size, hidden_size, width, height)
"""
def __init__(self, input_size, hidden_size, kernel_size):
super(CRNNcell, self).__init__()
self.kernel_size = kernel_size
self.i2h = nn.Conv2d(input_size, hidden_size, kernel_size, padding=
self.kernel_size // 2)
self.h2h = nn.Conv2d(hidden_size, hidden_size, kernel_size, padding
=self.kernel_size // 2)
self.ih2ih = nn.Conv2d(hidden_size, hidden_size, kernel_size,
padding=self.kernel_size // 2)
self.relu = nn.ReLU(inplace=True)
def forward(self, input, hidden_iteration, hidden):
in_to_hid = self.i2h(input)
hid_to_hid = self.h2h(hidden)
ih_to_ih = self.ih2ih(hidden_iteration)
hidden = self.relu(in_to_hid + hid_to_hid + ih_to_ih)
return hidden
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'hidden_size': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 25 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp4 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr3 + x3, xmask)
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp11 = tl.full([1], 0, tl.int32)
tmp12 = triton_helpers.maximum(tmp11, tmp10)
tmp13 = 0.0
tmp14 = tmp12 <= tmp13
tl.store(in_out_ptr0 + x3, tmp12, xmask)
tl.store(out_ptr0 + x3, tmp14, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_7, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 5, 5), (100, 25, 5, 1))
buf1 = extern_kernels.convolution(primals_6, primals_4, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf2 = extern_kernels.convolution(primals_9, primals_7, stride=(1,
1), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 5, 5), (100, 25, 5, 1))
buf3 = buf0
del buf0
buf4 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_add_convolution_relu_threshold_backward_0[grid(400)](
buf3, primals_2, buf1, primals_5, buf2, primals_8, buf4, 400,
XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del buf2
del primals_2
del primals_5
del primals_8
return (buf3, primals_1, primals_3, primals_4, primals_6, primals_7,
primals_9, buf4)
class CRNNcellNew(nn.Module):
"""
Convolutional RNN cell that evolves over both time and iterations
Parameters
-----------------
input: 4d tensor, shape (batch_size, channel, width, height)
hidden: hidden states in temporal dimension, 4d tensor, shape (batch_size, hidden_size, width, height)
hidden_iteration: hidden states in iteration dimension, 4d tensor, shape (batch_size, hidden_size, width, height)
Returns
-----------------
output: 4d tensor, shape (batch_size, hidden_size, width, height)
"""
def __init__(self, input_size, hidden_size, kernel_size):
super(CRNNcellNew, self).__init__()
self.kernel_size = kernel_size
self.i2h = nn.Conv2d(input_size, hidden_size, kernel_size, padding=
self.kernel_size // 2)
self.h2h = nn.Conv2d(hidden_size, hidden_size, kernel_size, padding
=self.kernel_size // 2)
self.ih2ih = nn.Conv2d(hidden_size, hidden_size, kernel_size,
padding=self.kernel_size // 2)
self.relu = nn.ReLU(inplace=True)
def forward(self, input_0, input_1, input_2):
primals_1 = self.i2h.weight
primals_2 = self.i2h.bias
primals_3 = self.h2h.weight
primals_5 = self.h2h.bias
primals_4 = self.ih2ih.weight
primals_8 = self.ih2ih.bias
primals_6 = input_0
primals_7 = input_1
primals_9 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
| myyaqubpython/https-github.com-cq615-Deep-MRI-Reconstruction | CRNNcell | false | 16,122 | [
"Apache-2.0"
] | 260 | 4484cff9f1e19ff9874c279c5c5d6cf2a317ddbf | https://github.com/myyaqubpython/https-github.com-cq615-Deep-MRI-Reconstruction/tree/4484cff9f1e19ff9874c279c5c5d6cf2a317ddbf |
ComplexLinear | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/k7/ck7eabcc7dltczcwxevz2ehp36wjm7bizks5htevqjcppd66dcya.py
# Topologically Sorted Source Nodes: [sub, add], Original ATen: [aten.sub, aten.add]
# Source node to ATen node mapping:
# add => add
# sub => sub
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %view_3), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_7), kwargs = {})
triton_poi_fused_add_sub_0 = async_compile.triton('triton_poi_fused_add_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_sub_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_out_ptr1 + (x2), xmask)
tmp9 = tl.load(in_ptr3 + (x2), xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp8 = tmp7 + tmp1
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tl.store(in_out_ptr0 + (x2), tmp6, xmask)
tl.store(in_out_ptr1 + (x2), tmp11, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf3)
del primals_1
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [sub, add], Original ATen: [aten.sub, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_sub_0.run(buf2, buf5, primals_2, buf1, primals_5, buf4, 256, grid=grid(256), stream=stream0)
del buf1
del buf4
del primals_2
del primals_5
return (buf2, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils
class ComplexLinear(nn.Module):
def __init__(self, in_features, out_features):
super(ComplexLinear, self).__init__()
self.fc_r = nn.Linear(in_features, out_features)
self.fc_i = nn.Linear(in_features, out_features)
def forward(self, input_r, input_i):
return self.fc_r(input_r) - self.fc_i(input_i), self.fc_r(input_i
) + self.fc_i(input_r)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_sub_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1,
in_ptr2, in_ptr3, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_out_ptr1 + x2, xmask)
tmp9 = tl.load(in_ptr3 + x2, xmask)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 - tmp5
tmp8 = tmp7 + tmp1
tmp10 = tmp9 + tmp4
tmp11 = tmp8 + tmp10
tl.store(in_out_ptr0 + x2, tmp6, xmask)
tl.store(in_out_ptr1 + x2, tmp11, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_6, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf3)
del primals_1
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf4)
del primals_4
buf2 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
get_raw_stream(0)
triton_poi_fused_add_sub_0[grid(256)](buf2, buf5, primals_2, buf1,
primals_5, buf4, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf1
del buf4
del primals_2
del primals_5
return buf2, buf5, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_6, (64, 4), (4, 1), 0)
class ComplexLinearNew(nn.Module):
def __init__(self, in_features, out_features):
super(ComplexLinearNew, self).__init__()
self.fc_r = nn.Linear(in_features, out_features)
self.fc_i = nn.Linear(in_features, out_features)
def forward(self, input_0, input_1):
primals_1 = self.fc_r.weight
primals_2 = self.fc_r.bias
primals_4 = self.fc_i.weight
primals_5 = self.fc_i.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
| muqiaoy/dl_signal | ComplexLinear | false | 16,123 | [
"MIT"
] | 54 | 3a30d14982016644bfc96a7d1ca0109b441f17fd | https://github.com/muqiaoy/dl_signal/tree/3a30d14982016644bfc96a7d1ca0109b441f17fd |
LovaszLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ws/cwsqdif7rwdifpmkktjmywepqj56sr7xv7wkuazqreqod25pnlsh.py
# Topologically Sorted Source Nodes: [mul, signs, mul_1, errors, sort, gt_sorted, gts, cumsum, sub_3, cumsum_1, mul_8, signs_4, mul_9, errors_4, sort_4, gt_sorted_4, gts_4, cumsum_8, sub_28, cumsum_9], Original ATen: [aten.mul, aten.sub, aten.rsub, aten.sort, aten.index, aten.sum, aten.cumsum]
# Source node to ATen node mapping:
# cumsum => cumsum
# cumsum_1 => cumsum_1
# cumsum_8 => cumsum_8
# cumsum_9 => cumsum_9
# errors => sub_1
# errors_4 => sub_26
# gt_sorted => index
# gt_sorted_4 => index_4
# gts => sum_1
# gts_4 => sum_9
# mul => mul
# mul_1 => mul_1
# mul_8 => mul_12
# mul_9 => mul_13
# signs => sub
# signs_4 => sub_25
# sort => sort
# sort_4 => sort_4
# sub_28 => sub_28
# sub_3 => sub_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 2.0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %sub), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_1), kwargs = {})
# %sort : [num_users=2] = call_function[target=torch.ops.aten.sort.default](args = (%sub_1, 0, True), kwargs = {})
# %index : [num_users=3] = call_function[target=torch.ops.aten.index.Tensor](args = (%view_1, [%getitem_1]), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%index,), kwargs = {})
# %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%index, 0), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %index), kwargs = {})
# %cumsum_1 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sub_3, 0), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_9, 2.0), kwargs = {})
# %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_12, 1.0), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_8, %sub_25), kwargs = {})
# %sub_26 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_13), kwargs = {})
# %sort_4 : [num_users=2] = call_function[target=torch.ops.aten.sort.default](args = (%sub_26, 0, True), kwargs = {})
# %index_4 : [num_users=3] = call_function[target=torch.ops.aten.index.Tensor](args = (%view_9, [%getitem_9]), kwargs = {})
# %sum_9 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%index_4,), kwargs = {})
# %cumsum_8 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%index_4, 0), kwargs = {})
# %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %index_4), kwargs = {})
# %cumsum_9 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sub_28, 0), kwargs = {})
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0 = async_compile.triton('triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {10: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11), equal_to_1=(10,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp7 = tmp4 - tmp6
tmp8 = r0
tmp9 = tmp8.to(tl.int16)
tmp10 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp11 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12, tmp13, = triton_helpers.sort_with_index(tmp10, tmp11, None, 1, stable=False, descending=True)
tmp14 = -tmp0
tmp15 = tmp4 - tmp1
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp4
tmp18 = tmp14 * tmp17
tmp19 = tmp4 - tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp21, tmp22, = triton_helpers.sort_with_index(tmp20, tmp11, None, 1, stable=False, descending=True)
tmp23 = tmp13.to(tl.int64)
tmp24 = tl.full([XBLOCK, RBLOCK], 64, tl.int32)
tmp25 = tmp23 + tmp24
tmp26 = tmp23 < 0
tmp27 = tl.where(tmp26, tmp25, tmp23)
tl.device_assert((0 <= tmp27) & (tmp27 < 64), "index out of bounds: 0 <= tmp27 < 64")
tmp29 = tl.load(in_ptr1 + (tmp27), None, eviction_policy='evict_last')
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp32 = tl.sum(tmp30, 1)[:, None]
tmp33 = tmp29.to(tl.float32)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp35, = tl.associative_scan((tmp34,), 1, _triton_helper_fn_add0)
tmp36 = tmp4 - tmp29
tmp37 = tmp36.to(tl.float32)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp39, = tl.associative_scan((tmp38,), 1, _triton_helper_fn_add0)
tmp40 = tmp22.to(tl.int64)
tmp41 = tmp40 + tmp24
tmp42 = tmp40 < 0
tmp43 = tl.where(tmp42, tmp41, tmp40)
tl.device_assert((0 <= tmp43) & (tmp43 < 64), "index out of bounds: 0 <= tmp43 < 64")
tmp45 = tl.load(in_ptr1 + (tmp43 % 64), None, eviction_policy='evict_last')
tmp46 = tmp4 - tmp45
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = tmp46.to(tl.float32)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp52, = tl.associative_scan((tmp51,), 1, _triton_helper_fn_add0)
tmp53 = tmp4 - tmp46
tmp54 = tmp53.to(tl.float32)
tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK])
tmp56, = tl.associative_scan((tmp55,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp21, None)
tl.store(out_ptr5 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp35, None)
tl.store(out_ptr6 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp39, None)
tl.store(out_ptr8 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp52, None)
tl.store(out_ptr9 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp56, None)
tl.store(out_ptr4 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp32, None)
tl.store(out_ptr7 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lc/clcn56zgwt73tdf5fnjfwzklq2tqnrcvlg5nsq42wlgja3wndd2g.py
# Topologically Sorted Source Nodes: [mul_2, signs_1, mul_3, errors_1, sort_1, gt_sorted_1, gts_1, cumsum_2, sub_9, cumsum_3, mul_10, signs_5, mul_11, errors_5, sort_5, gt_sorted_5, gts_5, cumsum_10, sub_34, cumsum_11], Original ATen: [aten.mul, aten.sub, aten.rsub, aten.sort, aten.index, aten.sum, aten.cumsum]
# Source node to ATen node mapping:
# cumsum_10 => cumsum_10
# cumsum_11 => cumsum_11
# cumsum_2 => cumsum_2
# cumsum_3 => cumsum_3
# errors_1 => sub_7
# errors_5 => sub_32
# gt_sorted_1 => index_1
# gt_sorted_5 => index_5
# gts_1 => sum_3
# gts_5 => sum_11
# mul_10 => mul_15
# mul_11 => mul_16
# mul_2 => mul_3
# mul_3 => mul_4
# signs_1 => sub_6
# signs_5 => sub_31
# sort_1 => sort_1
# sort_5 => sort_5
# sub_34 => sub_34
# sub_9 => sub_9
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 2.0), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_3, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %sub_6), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_4), kwargs = {})
# %sort_1 : [num_users=2] = call_function[target=torch.ops.aten.sort.default](args = (%sub_7, 0, True), kwargs = {})
# %index_1 : [num_users=3] = call_function[target=torch.ops.aten.index.Tensor](args = (%view_3, [%getitem_3]), kwargs = {})
# %sum_3 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%index_1,), kwargs = {})
# %cumsum_2 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%index_1, 0), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %index_1), kwargs = {})
# %cumsum_3 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sub_9, 0), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_11, 2.0), kwargs = {})
# %sub_31 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_15, 1.0), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_10, %sub_31), kwargs = {})
# %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_16), kwargs = {})
# %sort_5 : [num_users=2] = call_function[target=torch.ops.aten.sort.default](args = (%sub_32, 0, True), kwargs = {})
# %index_5 : [num_users=3] = call_function[target=torch.ops.aten.index.Tensor](args = (%view_11, [%getitem_11]), kwargs = {})
# %sum_11 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%index_5,), kwargs = {})
# %cumsum_10 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%index_5, 0), kwargs = {})
# %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %index_5), kwargs = {})
# %cumsum_11 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sub_34, 0), kwargs = {})
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_1 = async_compile.triton('triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {10: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11), equal_to_1=(10,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_1(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (64 + r0), None)
tmp1 = tl.load(in_ptr1 + (64 + r0), None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp7 = tmp4 - tmp6
tmp8 = r0
tmp9 = tmp8.to(tl.int16)
tmp10 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp11 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12, tmp13, = triton_helpers.sort_with_index(tmp10, tmp11, None, 1, stable=False, descending=True)
tmp14 = -tmp0
tmp15 = tmp4 - tmp1
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp4
tmp18 = tmp14 * tmp17
tmp19 = tmp4 - tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp21, tmp22, = triton_helpers.sort_with_index(tmp20, tmp11, None, 1, stable=False, descending=True)
tmp23 = tmp13.to(tl.int64)
tmp24 = tl.full([XBLOCK, RBLOCK], 64, tl.int32)
tmp25 = tmp23 + tmp24
tmp26 = tmp23 < 0
tmp27 = tl.where(tmp26, tmp25, tmp23)
tl.device_assert((0 <= tmp27) & (tmp27 < 64), "index out of bounds: 0 <= tmp27 < 64")
tmp29 = tl.load(in_ptr1 + (64 + tmp27), None, eviction_policy='evict_last')
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp32 = tl.sum(tmp30, 1)[:, None]
tmp33 = tmp29.to(tl.float32)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp35, = tl.associative_scan((tmp34,), 1, _triton_helper_fn_add0)
tmp36 = tmp4 - tmp29
tmp37 = tmp36.to(tl.float32)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp39, = tl.associative_scan((tmp38,), 1, _triton_helper_fn_add0)
tmp40 = tmp22.to(tl.int64)
tmp41 = tmp40 + tmp24
tmp42 = tmp40 < 0
tmp43 = tl.where(tmp42, tmp41, tmp40)
tl.device_assert((0 <= tmp43) & (tmp43 < 64), "index out of bounds: 0 <= tmp43 < 64")
tmp45 = tl.load(in_ptr1 + (64 + (tmp43 % 64)), None, eviction_policy='evict_last')
tmp46 = tmp4 - tmp45
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = tmp46.to(tl.float32)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp52, = tl.associative_scan((tmp51,), 1, _triton_helper_fn_add0)
tmp53 = tmp4 - tmp46
tmp54 = tmp53.to(tl.float32)
tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK])
tmp56, = tl.associative_scan((tmp55,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp21, None)
tl.store(out_ptr5 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp35, None)
tl.store(out_ptr6 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp39, None)
tl.store(out_ptr8 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp52, None)
tl.store(out_ptr9 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp56, None)
tl.store(out_ptr4 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp32, None)
tl.store(out_ptr7 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t2/ct23zra5pezdee6b4weaxv6fwfewt6xglzkuggpognx4qcxjyggp.py
# Topologically Sorted Source Nodes: [mul_4, signs_2, mul_5, errors_2, sort_2, gt_sorted_2, gts_2, cumsum_4, sub_15, cumsum_5, mul_12, signs_6, mul_13, errors_6, sort_6, gt_sorted_6, gts_6, cumsum_12, sub_40, cumsum_13], Original ATen: [aten.mul, aten.sub, aten.rsub, aten.sort, aten.index, aten.sum, aten.cumsum]
# Source node to ATen node mapping:
# cumsum_12 => cumsum_12
# cumsum_13 => cumsum_13
# cumsum_4 => cumsum_4
# cumsum_5 => cumsum_5
# errors_2 => sub_13
# errors_6 => sub_38
# gt_sorted_2 => index_2
# gt_sorted_6 => index_6
# gts_2 => sum_5
# gts_6 => sum_13
# mul_12 => mul_18
# mul_13 => mul_19
# mul_4 => mul_6
# mul_5 => mul_7
# signs_2 => sub_12
# signs_6 => sub_37
# sort_2 => sort_2
# sort_6 => sort_6
# sub_15 => sub_15
# sub_40 => sub_40
# Graph fragment:
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_5, 2.0), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_6, 1.0), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_4, %sub_12), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_7), kwargs = {})
# %sort_2 : [num_users=2] = call_function[target=torch.ops.aten.sort.default](args = (%sub_13, 0, True), kwargs = {})
# %index_2 : [num_users=3] = call_function[target=torch.ops.aten.index.Tensor](args = (%view_5, [%getitem_5]), kwargs = {})
# %sum_5 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%index_2,), kwargs = {})
# %cumsum_4 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%index_2, 0), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %index_2), kwargs = {})
# %cumsum_5 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sub_15, 0), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_13, 2.0), kwargs = {})
# %sub_37 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_18, 1.0), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_12, %sub_37), kwargs = {})
# %sub_38 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_19), kwargs = {})
# %sort_6 : [num_users=2] = call_function[target=torch.ops.aten.sort.default](args = (%sub_38, 0, True), kwargs = {})
# %index_6 : [num_users=3] = call_function[target=torch.ops.aten.index.Tensor](args = (%view_13, [%getitem_13]), kwargs = {})
# %sum_13 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%index_6,), kwargs = {})
# %cumsum_12 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%index_6, 0), kwargs = {})
# %sub_40 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %index_6), kwargs = {})
# %cumsum_13 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sub_40, 0), kwargs = {})
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_2 = async_compile.triton('triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {10: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11), equal_to_1=(10,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_2(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (128 + r0), None)
tmp1 = tl.load(in_ptr1 + (128 + r0), None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp7 = tmp4 - tmp6
tmp8 = r0
tmp9 = tmp8.to(tl.int16)
tmp10 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp11 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12, tmp13, = triton_helpers.sort_with_index(tmp10, tmp11, None, 1, stable=False, descending=True)
tmp14 = -tmp0
tmp15 = tmp4 - tmp1
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp4
tmp18 = tmp14 * tmp17
tmp19 = tmp4 - tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp21, tmp22, = triton_helpers.sort_with_index(tmp20, tmp11, None, 1, stable=False, descending=True)
tmp23 = tmp13.to(tl.int64)
tmp24 = tl.full([XBLOCK, RBLOCK], 64, tl.int32)
tmp25 = tmp23 + tmp24
tmp26 = tmp23 < 0
tmp27 = tl.where(tmp26, tmp25, tmp23)
tl.device_assert((0 <= tmp27) & (tmp27 < 64), "index out of bounds: 0 <= tmp27 < 64")
tmp29 = tl.load(in_ptr1 + (128 + tmp27), None, eviction_policy='evict_last')
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp32 = tl.sum(tmp30, 1)[:, None]
tmp33 = tmp29.to(tl.float32)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp35, = tl.associative_scan((tmp34,), 1, _triton_helper_fn_add0)
tmp36 = tmp4 - tmp29
tmp37 = tmp36.to(tl.float32)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp39, = tl.associative_scan((tmp38,), 1, _triton_helper_fn_add0)
tmp40 = tmp22.to(tl.int64)
tmp41 = tmp40 + tmp24
tmp42 = tmp40 < 0
tmp43 = tl.where(tmp42, tmp41, tmp40)
tl.device_assert((0 <= tmp43) & (tmp43 < 64), "index out of bounds: 0 <= tmp43 < 64")
tmp45 = tl.load(in_ptr1 + (128 + (tmp43 % 64)), None, eviction_policy='evict_last')
tmp46 = tmp4 - tmp45
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = tmp46.to(tl.float32)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp52, = tl.associative_scan((tmp51,), 1, _triton_helper_fn_add0)
tmp53 = tmp4 - tmp46
tmp54 = tmp53.to(tl.float32)
tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK])
tmp56, = tl.associative_scan((tmp55,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp21, None)
tl.store(out_ptr5 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp35, None)
tl.store(out_ptr6 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp39, None)
tl.store(out_ptr8 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp52, None)
tl.store(out_ptr9 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp56, None)
tl.store(out_ptr4 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp32, None)
tl.store(out_ptr7 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/br/cbrvczo3w2tkflhjlh4bke7azn3nmmotymmklw5zour5srtko47t.py
# Topologically Sorted Source Nodes: [mul_6, signs_3, mul_7, errors_3, sort_3, gt_sorted_3, gts_3, cumsum_6, sub_21, cumsum_7, mul_14, signs_7, mul_15, errors_7, sort_7, gt_sorted_7, gts_7, cumsum_14, sub_46, cumsum_15], Original ATen: [aten.mul, aten.sub, aten.rsub, aten.sort, aten.index, aten.sum, aten.cumsum]
# Source node to ATen node mapping:
# cumsum_14 => cumsum_14
# cumsum_15 => cumsum_15
# cumsum_6 => cumsum_6
# cumsum_7 => cumsum_7
# errors_3 => sub_19
# errors_7 => sub_44
# gt_sorted_3 => index_3
# gt_sorted_7 => index_7
# gts_3 => sum_7
# gts_7 => sum_15
# mul_14 => mul_21
# mul_15 => mul_22
# mul_6 => mul_9
# mul_7 => mul_10
# signs_3 => sub_18
# signs_7 => sub_43
# sort_3 => sort_3
# sort_7 => sort_7
# sub_21 => sub_21
# sub_46 => sub_46
# Graph fragment:
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_7, 2.0), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_9, 1.0), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_6, %sub_18), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_10), kwargs = {})
# %sort_3 : [num_users=2] = call_function[target=torch.ops.aten.sort.default](args = (%sub_19, 0, True), kwargs = {})
# %index_3 : [num_users=3] = call_function[target=torch.ops.aten.index.Tensor](args = (%view_7, [%getitem_7]), kwargs = {})
# %sum_7 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%index_3,), kwargs = {})
# %cumsum_6 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%index_3, 0), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %index_3), kwargs = {})
# %cumsum_7 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sub_21, 0), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_15, 2.0), kwargs = {})
# %sub_43 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_21, 1.0), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_14, %sub_43), kwargs = {})
# %sub_44 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_22), kwargs = {})
# %sort_7 : [num_users=2] = call_function[target=torch.ops.aten.sort.default](args = (%sub_44, 0, True), kwargs = {})
# %index_7 : [num_users=3] = call_function[target=torch.ops.aten.index.Tensor](args = (%view_15, [%getitem_15]), kwargs = {})
# %sum_15 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%index_7,), kwargs = {})
# %cumsum_14 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%index_7, 0), kwargs = {})
# %sub_46 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %index_7), kwargs = {})
# %cumsum_15 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sub_46, 0), kwargs = {})
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_3 = async_compile.triton('triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: 'i32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {10: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11), equal_to_1=(10,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_3(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8, out_ptr9, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (192 + r0), None)
tmp1 = tl.load(in_ptr1 + (192 + r0), None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp7 = tmp4 - tmp6
tmp8 = r0
tmp9 = tmp8.to(tl.int16)
tmp10 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp11 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12, tmp13, = triton_helpers.sort_with_index(tmp10, tmp11, None, 1, stable=False, descending=True)
tmp14 = -tmp0
tmp15 = tmp4 - tmp1
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp4
tmp18 = tmp14 * tmp17
tmp19 = tmp4 - tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp21, tmp22, = triton_helpers.sort_with_index(tmp20, tmp11, None, 1, stable=False, descending=True)
tmp23 = tmp13.to(tl.int64)
tmp24 = tl.full([XBLOCK, RBLOCK], 64, tl.int32)
tmp25 = tmp23 + tmp24
tmp26 = tmp23 < 0
tmp27 = tl.where(tmp26, tmp25, tmp23)
tl.device_assert((0 <= tmp27) & (tmp27 < 64), "index out of bounds: 0 <= tmp27 < 64")
tmp29 = tl.load(in_ptr1 + (192 + tmp27), None, eviction_policy='evict_last')
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp32 = tl.sum(tmp30, 1)[:, None]
tmp33 = tmp29.to(tl.float32)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp35, = tl.associative_scan((tmp34,), 1, _triton_helper_fn_add0)
tmp36 = tmp4 - tmp29
tmp37 = tmp36.to(tl.float32)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp39, = tl.associative_scan((tmp38,), 1, _triton_helper_fn_add0)
tmp40 = tmp22.to(tl.int64)
tmp41 = tmp40 + tmp24
tmp42 = tmp40 < 0
tmp43 = tl.where(tmp42, tmp41, tmp40)
tl.device_assert((0 <= tmp43) & (tmp43 < 64), "index out of bounds: 0 <= tmp43 < 64")
tmp45 = tl.load(in_ptr1 + (192 + (tmp43 % 64)), None, eviction_policy='evict_last')
tmp46 = tmp4 - tmp45
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = tmp46.to(tl.float32)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp52, = tl.associative_scan((tmp51,), 1, _triton_helper_fn_add0)
tmp53 = tmp4 - tmp46
tmp54 = tmp53.to(tl.float32)
tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK])
tmp56, = tl.associative_scan((tmp55,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp12, None)
tl.store(out_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp21, None)
tl.store(out_ptr5 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp35, None)
tl.store(out_ptr6 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp39, None)
tl.store(out_ptr8 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp52, None)
tl.store(out_ptr9 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp56, None)
tl.store(out_ptr4 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp32, None)
tl.store(out_ptr7 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6f/c6fkvux56jadxa2rz3tpawg4kuz2lnmamfxu5jg3lvxj3m4zht6w.py
# Topologically Sorted Source Nodes: [intersection, union, truediv, jaccard, intersection_1, union_1, truediv_1, jaccard_1, intersection_2, union_2, truediv_2, jaccard_2, intersection_3, union_3, truediv_3, jaccard_3, intersection_4, union_4, truediv_5, jaccard_4, intersection_5, union_5, truediv_6, jaccard_5, intersection_6, union_6, truediv_7, jaccard_6, intersection_7, union_7, truediv_8, jaccard_7, relu, sub_5, loss, relu_1, sub_11, loss_1, acc, relu_2, sub_17, loss_2, acc_1, relu_3, sub_23, loss_3, acc_2, loss_4, relu_4, sub_30, loss_5, relu_5, sub_36, loss_6, acc_3, relu_6, sub_42, loss_7, acc_4, relu_7, sub_48, loss_8, acc_5, loss_9, add_8, truediv_10], Original ATen: [aten.sub, aten.add, aten.div, aten.rsub, aten.relu, aten.dot]
# Source node to ATen node mapping:
# acc => add_4
# acc_1 => add_5
# acc_2 => add_6
# acc_3 => add_11
# acc_4 => add_12
# acc_5 => add_13
# add_8 => add_14
# intersection => sub_2
# intersection_1 => sub_8
# intersection_2 => sub_14
# intersection_3 => sub_20
# intersection_4 => sub_27
# intersection_5 => sub_33
# intersection_6 => sub_39
# intersection_7 => sub_45
# jaccard => sub_4
# jaccard_1 => sub_10
# jaccard_2 => sub_16
# jaccard_3 => sub_22
# jaccard_4 => sub_29
# jaccard_5 => sub_35
# jaccard_6 => sub_41
# jaccard_7 => sub_47
# loss => mul_2, sum_2
# loss_1 => mul_5, sum_4
# loss_2 => mul_8, sum_6
# loss_3 => mul_11, sum_8
# loss_4 => div_4
# loss_5 => mul_14, sum_10
# loss_6 => mul_17, sum_12
# loss_7 => mul_20, sum_14
# loss_8 => mul_23, sum_16
# loss_9 => div_9
# relu => relu
# relu_1 => relu_1
# relu_2 => relu_2
# relu_3 => relu_3
# relu_4 => relu_4
# relu_5 => relu_5
# relu_6 => relu_6
# relu_7 => relu_7
# sub_11 => sub_11
# sub_17 => sub_17
# sub_23 => sub_23
# sub_30 => sub_30
# sub_36 => sub_36
# sub_42 => sub_42
# sub_48 => sub_48
# sub_5 => sub_5
# truediv => div
# truediv_1 => div_1
# truediv_10 => div_10
# truediv_2 => div_2
# truediv_3 => div_3
# truediv_5 => div_5
# truediv_6 => div_6
# truediv_7 => div_7
# truediv_8 => div_8
# union => add
# union_1 => add_1
# union_2 => add_2
# union_3 => add_3
# union_4 => add_7
# union_5 => add_8
# union_6 => add_9
# union_7 => add_10
# Graph fragment:
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_1, %cumsum), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %cumsum_1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_2, %add), kwargs = {})
# %sub_4 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_3, %cumsum_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_3, %cumsum_3), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_8, %add_1), kwargs = {})
# %sub_10 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div_1), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_5, %cumsum_4), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, %cumsum_5), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_14, %add_2), kwargs = {})
# %sub_16 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div_2), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_7, %cumsum_6), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_7, %cumsum_7), kwargs = {})
# %div_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_20, %add_3), kwargs = {})
# %sub_22 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div_3), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_9, %cumsum_8), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_9, %cumsum_9), kwargs = {})
# %div_5 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_27, %add_7), kwargs = {})
# %sub_29 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div_5), kwargs = {})
# %sub_33 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_11, %cumsum_10), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_11, %cumsum_11), kwargs = {})
# %div_6 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_33, %add_8), kwargs = {})
# %sub_35 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div_6), kwargs = {})
# %sub_39 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_13, %cumsum_12), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_13, %cumsum_13), kwargs = {})
# %div_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_39, %add_9), kwargs = {})
# %sub_41 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div_7), kwargs = {})
# %sub_45 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_15, %cumsum_14), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_15, %cumsum_15), kwargs = {})
# %div_8 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_45, %add_10), kwargs = {})
# %sub_47 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div_8), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_1, %slice_2), kwargs = {})
# %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%sub_4, %sub_5, 0, 1, 64), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %slice_scatter_default), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {})
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_5, %slice_6), kwargs = {})
# %slice_scatter_default_1 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%sub_10, %sub_11, 0, 1, 64), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_1, %slice_scatter_default_1), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_5,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_4), kwargs = {})
# %relu_2 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_4,), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_9, %slice_10), kwargs = {})
# %slice_scatter_default_2 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%sub_16, %sub_17, 0, 1, 64), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_2, %slice_scatter_default_2), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_8,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %sum_6), kwargs = {})
# %relu_3 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_6,), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_13, %slice_14), kwargs = {})
# %slice_scatter_default_3 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%sub_22, %sub_23, 0, 1, 64), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_3, %slice_scatter_default_3), kwargs = {})
# %sum_8 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_11,), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %sum_8), kwargs = {})
# %div_4 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_6, 4), kwargs = {})
# %relu_4 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_8,), kwargs = {})
# %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_17, %slice_18), kwargs = {})
# %slice_scatter_default_4 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%sub_29, %sub_30, 0, 1, 64), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_4, %slice_scatter_default_4), kwargs = {})
# %sum_10 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_14,), kwargs = {})
# %relu_5 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_10,), kwargs = {})
# %sub_36 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_21, %slice_22), kwargs = {})
# %slice_scatter_default_5 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%sub_35, %sub_36, 0, 1, 64), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_5, %slice_scatter_default_5), kwargs = {})
# %sum_12 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_17,), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_10, %sum_12), kwargs = {})
# %relu_6 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_12,), kwargs = {})
# %sub_42 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_25, %slice_26), kwargs = {})
# %slice_scatter_default_6 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%sub_41, %sub_42, 0, 1, 64), kwargs = {})
# %mul_20 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_6, %slice_scatter_default_6), kwargs = {})
# %sum_14 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_20,), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %sum_14), kwargs = {})
# %relu_7 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_14,), kwargs = {})
# %sub_48 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_29, %slice_30), kwargs = {})
# %slice_scatter_default_7 : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%sub_47, %sub_48, 0, 1, 64), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu_7, %slice_scatter_default_7), kwargs = {})
# %sum_16 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_23,), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %sum_16), kwargs = {})
# %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_13, 4), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_4, %div_9), kwargs = {})
# %div_10 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_14, 2), kwargs = {})
triton_per_fused_add_div_dot_relu_rsub_sub_4 = async_compile.triton('triton_per_fused_add_div_dot_relu_rsub_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: '*fp32', 18: '*fp32', 19: '*fp32', 20: '*fp32', 21: '*fp32', 22: '*fp32', 23: '*fp32', 24: '*fp32', 25: '*fp32', 26: '*fp32', 27: '*fp32', 28: '*fp32', 29: '*fp32', 30: '*fp32', 31: '*fp32', 32: 'i32', 33: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {32: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 33), equal_to_1=(32,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_dot_relu_rsub_sub_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1', 'in_out_ptr2', 'in_out_ptr3', 'in_out_ptr4', 'in_out_ptr5', 'in_out_ptr6', 'in_out_ptr7'], 'no_x_dim': False, 'num_load': 64, 'num_reduction': 8, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_dot_relu_rsub_sub_4(in_out_ptr0, in_out_ptr1, in_out_ptr2, in_out_ptr3, in_out_ptr4, in_out_ptr5, in_out_ptr6, in_out_ptr7, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19, in_ptr20, in_ptr21, in_ptr22, in_ptr23, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp6 = tl.load(in_out_ptr0 + (0))
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp24 = tl.load(in_ptr1 + (r0), None)
tmp26 = tl.load(in_ptr2 + (r0), None)
tmp35 = tl.load(in_ptr3 + (r0), None)
tmp37 = tl.load(in_out_ptr1 + (0))
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp54 = tl.load(in_ptr4 + (r0), None)
tmp56 = tl.load(in_ptr5 + (r0), None)
tmp65 = tl.load(in_ptr6 + (r0), None)
tmp67 = tl.load(in_out_ptr2 + (0))
tmp68 = tl.broadcast_to(tmp67, [XBLOCK, RBLOCK])
tmp84 = tl.load(in_ptr7 + (r0), None)
tmp86 = tl.load(in_ptr8 + (r0), None)
tmp95 = tl.load(in_ptr9 + (r0), None)
tmp97 = tl.load(in_out_ptr3 + (0))
tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK])
tmp114 = tl.load(in_ptr10 + (r0), None)
tmp116 = tl.load(in_ptr11 + (r0), None)
tmp125 = tl.load(in_ptr12 + (r0), None)
tmp127 = tl.load(in_out_ptr4 + (0))
tmp128 = tl.broadcast_to(tmp127, [XBLOCK, RBLOCK])
tmp144 = tl.load(in_ptr13 + (r0), None)
tmp146 = tl.load(in_ptr14 + (r0), None)
tmp155 = tl.load(in_ptr15 + (r0), None)
tmp157 = tl.load(in_out_ptr5 + (0))
tmp158 = tl.broadcast_to(tmp157, [XBLOCK, RBLOCK])
tmp174 = tl.load(in_ptr16 + (r0), None)
tmp176 = tl.load(in_ptr17 + (r0), None)
tmp185 = tl.load(in_ptr18 + (r0), None)
tmp187 = tl.load(in_out_ptr6 + (0))
tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK])
tmp204 = tl.load(in_ptr19 + (r0), None)
tmp206 = tl.load(in_ptr20 + (r0), None)
tmp215 = tl.load(in_ptr21 + (r0), None)
tmp217 = tl.load(in_out_ptr7 + (0))
tmp218 = tl.broadcast_to(tmp217, [XBLOCK, RBLOCK])
tmp234 = tl.load(in_ptr22 + (r0), None)
tmp236 = tl.load(in_ptr23 + (r0), None)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = r0
tmp4 = tl.full([1, 1], 1, tl.int64)
tmp5 = tmp3 >= tmp4
tmp8 = tl.load(in_ptr1 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp9 = tmp7 - tmp8
tmp10 = tl.load(in_ptr2 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp11 = tmp7 + tmp10
tmp12 = tmp9 / tmp11
tmp13 = 1.0
tmp14 = tmp13 - tmp12
tmp15 = tl.load(in_ptr1 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp16 = tmp7 - tmp15
tmp17 = tl.load(in_ptr2 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp18 = tmp7 + tmp17
tmp19 = tmp16 / tmp18
tmp20 = tmp13 - tmp19
tmp21 = tmp14 - tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp5, tmp21, tmp22)
tmp25 = tmp7 - tmp24
tmp27 = tmp7 + tmp26
tmp28 = tmp25 / tmp27
tmp29 = tmp13 - tmp28
tmp30 = tl.where(tmp5, tmp23, tmp29)
tmp31 = tmp2 * tmp30
tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK])
tmp34 = tl.sum(tmp32, 1)[:, None]
tmp36 = triton_helpers.maximum(tmp1, tmp35)
tmp39 = tl.load(in_ptr4 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp40 = tmp38 - tmp39
tmp41 = tl.load(in_ptr5 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp42 = tmp38 + tmp41
tmp43 = tmp40 / tmp42
tmp44 = tmp13 - tmp43
tmp45 = tl.load(in_ptr4 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp46 = tmp38 - tmp45
tmp47 = tl.load(in_ptr5 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp48 = tmp38 + tmp47
tmp49 = tmp46 / tmp48
tmp50 = tmp13 - tmp49
tmp51 = tmp44 - tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp5, tmp51, tmp52)
tmp55 = tmp38 - tmp54
tmp57 = tmp38 + tmp56
tmp58 = tmp55 / tmp57
tmp59 = tmp13 - tmp58
tmp60 = tl.where(tmp5, tmp53, tmp59)
tmp61 = tmp36 * tmp60
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp64 = tl.sum(tmp62, 1)[:, None]
tmp66 = triton_helpers.maximum(tmp1, tmp65)
tmp69 = tl.load(in_ptr7 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp70 = tmp68 - tmp69
tmp71 = tl.load(in_ptr8 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp72 = tmp68 + tmp71
tmp73 = tmp70 / tmp72
tmp74 = tmp13 - tmp73
tmp75 = tl.load(in_ptr7 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp76 = tmp68 - tmp75
tmp77 = tl.load(in_ptr8 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp78 = tmp68 + tmp77
tmp79 = tmp76 / tmp78
tmp80 = tmp13 - tmp79
tmp81 = tmp74 - tmp80
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp5, tmp81, tmp82)
tmp85 = tmp68 - tmp84
tmp87 = tmp68 + tmp86
tmp88 = tmp85 / tmp87
tmp89 = tmp13 - tmp88
tmp90 = tl.where(tmp5, tmp83, tmp89)
tmp91 = tmp66 * tmp90
tmp92 = tl.broadcast_to(tmp91, [XBLOCK, RBLOCK])
tmp94 = tl.sum(tmp92, 1)[:, None]
tmp96 = triton_helpers.maximum(tmp1, tmp95)
tmp99 = tl.load(in_ptr10 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp100 = tmp98 - tmp99
tmp101 = tl.load(in_ptr11 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp102 = tmp98 + tmp101
tmp103 = tmp100 / tmp102
tmp104 = tmp13 - tmp103
tmp105 = tl.load(in_ptr10 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp106 = tmp98 - tmp105
tmp107 = tl.load(in_ptr11 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp108 = tmp98 + tmp107
tmp109 = tmp106 / tmp108
tmp110 = tmp13 - tmp109
tmp111 = tmp104 - tmp110
tmp112 = tl.full(tmp111.shape, 0.0, tmp111.dtype)
tmp113 = tl.where(tmp5, tmp111, tmp112)
tmp115 = tmp98 - tmp114
tmp117 = tmp98 + tmp116
tmp118 = tmp115 / tmp117
tmp119 = tmp13 - tmp118
tmp120 = tl.where(tmp5, tmp113, tmp119)
tmp121 = tmp96 * tmp120
tmp122 = tl.broadcast_to(tmp121, [XBLOCK, RBLOCK])
tmp124 = tl.sum(tmp122, 1)[:, None]
tmp126 = triton_helpers.maximum(tmp1, tmp125)
tmp129 = tl.load(in_ptr13 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp130 = tmp128 - tmp129
tmp131 = tl.load(in_ptr14 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp132 = tmp128 + tmp131
tmp133 = tmp130 / tmp132
tmp134 = tmp13 - tmp133
tmp135 = tl.load(in_ptr13 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp136 = tmp128 - tmp135
tmp137 = tl.load(in_ptr14 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp138 = tmp128 + tmp137
tmp139 = tmp136 / tmp138
tmp140 = tmp13 - tmp139
tmp141 = tmp134 - tmp140
tmp142 = tl.full(tmp141.shape, 0.0, tmp141.dtype)
tmp143 = tl.where(tmp5, tmp141, tmp142)
tmp145 = tmp128 - tmp144
tmp147 = tmp128 + tmp146
tmp148 = tmp145 / tmp147
tmp149 = tmp13 - tmp148
tmp150 = tl.where(tmp5, tmp143, tmp149)
tmp151 = tmp126 * tmp150
tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK])
tmp154 = tl.sum(tmp152, 1)[:, None]
tmp156 = triton_helpers.maximum(tmp1, tmp155)
tmp159 = tl.load(in_ptr16 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp160 = tmp158 - tmp159
tmp161 = tl.load(in_ptr17 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp162 = tmp158 + tmp161
tmp163 = tmp160 / tmp162
tmp164 = tmp13 - tmp163
tmp165 = tl.load(in_ptr16 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp166 = tmp158 - tmp165
tmp167 = tl.load(in_ptr17 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp168 = tmp158 + tmp167
tmp169 = tmp166 / tmp168
tmp170 = tmp13 - tmp169
tmp171 = tmp164 - tmp170
tmp172 = tl.full(tmp171.shape, 0.0, tmp171.dtype)
tmp173 = tl.where(tmp5, tmp171, tmp172)
tmp175 = tmp158 - tmp174
tmp177 = tmp158 + tmp176
tmp178 = tmp175 / tmp177
tmp179 = tmp13 - tmp178
tmp180 = tl.where(tmp5, tmp173, tmp179)
tmp181 = tmp156 * tmp180
tmp182 = tl.broadcast_to(tmp181, [XBLOCK, RBLOCK])
tmp184 = tl.sum(tmp182, 1)[:, None]
tmp186 = triton_helpers.maximum(tmp1, tmp185)
tmp189 = tl.load(in_ptr19 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp190 = tmp188 - tmp189
tmp191 = tl.load(in_ptr20 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp192 = tmp188 + tmp191
tmp193 = tmp190 / tmp192
tmp194 = tmp13 - tmp193
tmp195 = tl.load(in_ptr19 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp196 = tmp188 - tmp195
tmp197 = tl.load(in_ptr20 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp198 = tmp188 + tmp197
tmp199 = tmp196 / tmp198
tmp200 = tmp13 - tmp199
tmp201 = tmp194 - tmp200
tmp202 = tl.full(tmp201.shape, 0.0, tmp201.dtype)
tmp203 = tl.where(tmp5, tmp201, tmp202)
tmp205 = tmp188 - tmp204
tmp207 = tmp188 + tmp206
tmp208 = tmp205 / tmp207
tmp209 = tmp13 - tmp208
tmp210 = tl.where(tmp5, tmp203, tmp209)
tmp211 = tmp186 * tmp210
tmp212 = tl.broadcast_to(tmp211, [XBLOCK, RBLOCK])
tmp214 = tl.sum(tmp212, 1)[:, None]
tmp216 = triton_helpers.maximum(tmp1, tmp215)
tmp219 = tl.load(in_ptr22 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp220 = tmp218 - tmp219
tmp221 = tl.load(in_ptr23 + (tl.broadcast_to(r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp222 = tmp218 + tmp221
tmp223 = tmp220 / tmp222
tmp224 = tmp13 - tmp223
tmp225 = tl.load(in_ptr22 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp226 = tmp218 - tmp225
tmp227 = tl.load(in_ptr23 + (tl.broadcast_to((-1) + r0, [XBLOCK, RBLOCK])), tmp5, other=0.0)
tmp228 = tmp218 + tmp227
tmp229 = tmp226 / tmp228
tmp230 = tmp13 - tmp229
tmp231 = tmp224 - tmp230
tmp232 = tl.full(tmp231.shape, 0.0, tmp231.dtype)
tmp233 = tl.where(tmp5, tmp231, tmp232)
tmp235 = tmp218 - tmp234
tmp237 = tmp218 + tmp236
tmp238 = tmp235 / tmp237
tmp239 = tmp13 - tmp238
tmp240 = tl.where(tmp5, tmp233, tmp239)
tmp241 = tmp216 * tmp240
tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK])
tmp244 = tl.sum(tmp242, 1)[:, None]
tmp245 = tmp34 + tmp64
tmp246 = tmp245 + tmp94
tmp247 = tmp246 + tmp124
tmp248 = 0.25
tmp249 = tmp247 * tmp248
tmp250 = tmp154 + tmp184
tmp251 = tmp250 + tmp214
tmp252 = tmp251 + tmp244
tmp253 = tmp252 * tmp248
tmp254 = tmp249 + tmp253
tmp255 = 0.5
tmp256 = tmp254 * tmp255
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp256, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf20 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf4 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf22 = empty_strided_cuda((), (), torch.float32)
buf23 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf24 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul, signs, mul_1, errors, sort, gt_sorted, gts, cumsum, sub_3, cumsum_1, mul_8, signs_4, mul_9, errors_4, sort_4, gt_sorted_4, gts_4, cumsum_8, sub_28, cumsum_9], Original ATen: [aten.mul, aten.sub, aten.rsub, aten.sort, aten.index, aten.sum, aten.cumsum]
stream0 = get_raw_stream(0)
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0.run(arg0_1, arg1_1, buf0, buf20, buf2, buf3, buf4, buf22, buf23, buf24, 1, 64, grid=grid(1), stream=stream0)
buf5 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf25 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf9 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf27 = empty_strided_cuda((), (), torch.float32)
buf28 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf29 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, signs_1, mul_3, errors_1, sort_1, gt_sorted_1, gts_1, cumsum_2, sub_9, cumsum_3, mul_10, signs_5, mul_11, errors_5, sort_5, gt_sorted_5, gts_5, cumsum_10, sub_34, cumsum_11], Original ATen: [aten.mul, aten.sub, aten.rsub, aten.sort, aten.index, aten.sum, aten.cumsum]
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_1.run(arg0_1, arg1_1, buf5, buf25, buf7, buf8, buf9, buf27, buf28, buf29, 1, 64, grid=grid(1), stream=stream0)
buf10 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf30 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf12 = empty_strided_cuda((), (), torch.float32)
buf13 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf14 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf32 = empty_strided_cuda((), (), torch.float32)
buf33 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf34 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_4, signs_2, mul_5, errors_2, sort_2, gt_sorted_2, gts_2, cumsum_4, sub_15, cumsum_5, mul_12, signs_6, mul_13, errors_6, sort_6, gt_sorted_6, gts_6, cumsum_12, sub_40, cumsum_13], Original ATen: [aten.mul, aten.sub, aten.rsub, aten.sort, aten.index, aten.sum, aten.cumsum]
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_2.run(arg0_1, arg1_1, buf10, buf30, buf12, buf13, buf14, buf32, buf33, buf34, 1, 64, grid=grid(1), stream=stream0)
buf15 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf35 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf17 = empty_strided_cuda((), (), torch.float32)
buf18 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf19 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf37 = empty_strided_cuda((), (), torch.float32)
buf38 = empty_strided_cuda((64, ), (1, ), torch.float32)
buf39 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_6, signs_3, mul_7, errors_3, sort_3, gt_sorted_3, gts_3, cumsum_6, sub_21, cumsum_7, mul_14, signs_7, mul_15, errors_7, sort_7, gt_sorted_7, gts_7, cumsum_14, sub_46, cumsum_15], Original ATen: [aten.mul, aten.sub, aten.rsub, aten.sort, aten.index, aten.sum, aten.cumsum]
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_3.run(arg0_1, arg1_1, buf15, buf35, buf17, buf18, buf19, buf37, buf38, buf39, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
buf40 = buf2; del buf2 # reuse
buf41 = buf7; del buf7 # reuse
buf42 = buf12; del buf12 # reuse
buf43 = buf17; del buf17 # reuse
buf44 = buf22; del buf22 # reuse
buf45 = buf27; del buf27 # reuse
buf46 = buf32; del buf32 # reuse
buf47 = buf37; del buf37 # reuse
buf48 = buf40; del buf40 # reuse
# Topologically Sorted Source Nodes: [intersection, union, truediv, jaccard, intersection_1, union_1, truediv_1, jaccard_1, intersection_2, union_2, truediv_2, jaccard_2, intersection_3, union_3, truediv_3, jaccard_3, intersection_4, union_4, truediv_5, jaccard_4, intersection_5, union_5, truediv_6, jaccard_5, intersection_6, union_6, truediv_7, jaccard_6, intersection_7, union_7, truediv_8, jaccard_7, relu, sub_5, loss, relu_1, sub_11, loss_1, acc, relu_2, sub_17, loss_2, acc_1, relu_3, sub_23, loss_3, acc_2, loss_4, relu_4, sub_30, loss_5, relu_5, sub_36, loss_6, acc_3, relu_6, sub_42, loss_7, acc_4, relu_7, sub_48, loss_8, acc_5, loss_9, add_8, truediv_10], Original ATen: [aten.sub, aten.add, aten.div, aten.rsub, aten.relu, aten.dot]
triton_per_fused_add_div_dot_relu_rsub_sub_4.run(buf48, buf41, buf42, buf43, buf44, buf45, buf46, buf47, buf0, buf3, buf4, buf5, buf8, buf9, buf10, buf13, buf14, buf15, buf18, buf19, buf20, buf23, buf24, buf25, buf28, buf29, buf30, buf33, buf34, buf35, buf38, buf39, 1, 64, grid=grid(1), stream=stream0)
del buf0
del buf10
del buf13
del buf14
del buf15
del buf18
del buf19
del buf20
del buf23
del buf24
del buf25
del buf28
del buf29
del buf3
del buf30
del buf33
del buf34
del buf35
del buf38
del buf39
del buf4
del buf41
del buf42
del buf43
del buf44
del buf45
del buf46
del buf47
del buf5
del buf8
del buf9
return (buf48, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
from itertools import filterfalse
def jaccard(outputs, targets, per_image=False, non_empty=False, min_pixels=5):
batch_size = outputs.size()[0]
eps = 0.001
if not per_image:
batch_size = 1
dice_target = targets.contiguous().view(batch_size, -1).float()
dice_output = outputs.contiguous().view(batch_size, -1)
target_sum = torch.sum(dice_target, dim=1)
intersection = torch.sum(dice_output * dice_target, dim=1)
losses = 1.0 - (intersection + eps) / (torch.sum(dice_output +
dice_target, dim=1) - intersection + eps)
if non_empty:
assert per_image
non_empty_images = 0
sum_loss = 0
for i in range(batch_size):
if target_sum[i] > min_pixels:
sum_loss += losses[i]
non_empty_images += 1
if non_empty_images == 0:
return 0
else:
return sum_loss / non_empty_images
return losses.mean()
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts.float() - gt_sorted.float().cumsum(0)
union = gts.float() + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1:
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def lovasz_hinge_flat(logits, labels):
if len(labels) == 0:
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * signs
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), grad)
return loss
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
ll = iter(l)
if ignore_nan:
ll = filterfalse(np.isnan, ll)
try:
n = 1
acc = next(ll)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(ll, 2):
acc += v
if n == 1:
return acc
return acc / n
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
if per_image:
loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(
0), lab.unsqueeze(0), ignore)) for log, lab in zip(logits, labels))
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)
)
return loss
def symmetric_lovasz(outputs, targets):
return (lovasz_hinge(outputs, targets) + lovasz_hinge(-outputs, 1 -
targets)) / 2
class LovaszLoss(nn.Module):
def __init__(self, ignore_index=255, per_image=True):
super().__init__()
self.ignore_index = ignore_index
self.per_image = per_image
def forward(self, outputs, targets):
outputs = outputs.contiguous()
targets = targets.contiguous()
return symmetric_lovasz(outputs, targets)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import numpy as np
from torch import nn
import torch.nn.functional as F
from itertools import filterfalse
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr2, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8,
out_ptr9, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp7 = tmp4 - tmp6
tmp8 = r0
tmp9 = tmp8.to(tl.int16)
tmp10 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp11 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12, tmp13 = triton_helpers.sort_with_index(tmp10, tmp11, None, 1,
stable=False, descending=True)
tmp14 = -tmp0
tmp15 = tmp4 - tmp1
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp4
tmp18 = tmp14 * tmp17
tmp19 = tmp4 - tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp21, tmp22 = triton_helpers.sort_with_index(tmp20, tmp11, None, 1,
stable=False, descending=True)
tmp23 = tmp13.to(tl.int64)
tmp24 = tl.full([XBLOCK, RBLOCK], 64, tl.int32)
tmp25 = tmp23 + tmp24
tmp26 = tmp23 < 0
tmp27 = tl.where(tmp26, tmp25, tmp23)
tl.device_assert((0 <= tmp27) & (tmp27 < 64),
'index out of bounds: 0 <= tmp27 < 64')
tmp29 = tl.load(in_ptr1 + tmp27, None, eviction_policy='evict_last')
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp32 = tl.sum(tmp30, 1)[:, None]
tmp33 = tmp29.to(tl.float32)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp35, = tl.associative_scan((tmp34,), 1, _triton_helper_fn_add0)
tmp36 = tmp4 - tmp29
tmp37 = tmp36.to(tl.float32)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp39, = tl.associative_scan((tmp38,), 1, _triton_helper_fn_add0)
tmp40 = tmp22.to(tl.int64)
tmp41 = tmp40 + tmp24
tmp42 = tmp40 < 0
tmp43 = tl.where(tmp42, tmp41, tmp40)
tl.device_assert((0 <= tmp43) & (tmp43 < 64),
'index out of bounds: 0 <= tmp43 < 64')
tmp45 = tl.load(in_ptr1 + tmp43 % 64, None, eviction_policy='evict_last')
tmp46 = tmp4 - tmp45
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = tmp46.to(tl.float32)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp52, = tl.associative_scan((tmp51,), 1, _triton_helper_fn_add0)
tmp53 = tmp4 - tmp46
tmp54 = tmp53.to(tl.float32)
tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK])
tmp56, = tl.associative_scan((tmp55,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp21, None)
tl.store(out_ptr5 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp35, None)
tl.store(out_ptr6 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp39, None)
tl.store(out_ptr8 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp52, None)
tl.store(out_ptr9 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp56, None)
tl.store(out_ptr4 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None)
tl.store(out_ptr7 + tl.full([XBLOCK, 1], 0, tl.int32), tmp49, None)
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_1(in_ptr0, in_ptr1,
out_ptr0, out_ptr2, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8,
out_ptr9, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (64 + r0), None)
tmp1 = tl.load(in_ptr1 + (64 + r0), None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp7 = tmp4 - tmp6
tmp8 = r0
tmp9 = tmp8.to(tl.int16)
tmp10 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp11 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12, tmp13 = triton_helpers.sort_with_index(tmp10, tmp11, None, 1,
stable=False, descending=True)
tmp14 = -tmp0
tmp15 = tmp4 - tmp1
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp4
tmp18 = tmp14 * tmp17
tmp19 = tmp4 - tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp21, tmp22 = triton_helpers.sort_with_index(tmp20, tmp11, None, 1,
stable=False, descending=True)
tmp23 = tmp13.to(tl.int64)
tmp24 = tl.full([XBLOCK, RBLOCK], 64, tl.int32)
tmp25 = tmp23 + tmp24
tmp26 = tmp23 < 0
tmp27 = tl.where(tmp26, tmp25, tmp23)
tl.device_assert((0 <= tmp27) & (tmp27 < 64),
'index out of bounds: 0 <= tmp27 < 64')
tmp29 = tl.load(in_ptr1 + (64 + tmp27), None, eviction_policy='evict_last')
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp32 = tl.sum(tmp30, 1)[:, None]
tmp33 = tmp29.to(tl.float32)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp35, = tl.associative_scan((tmp34,), 1, _triton_helper_fn_add0)
tmp36 = tmp4 - tmp29
tmp37 = tmp36.to(tl.float32)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp39, = tl.associative_scan((tmp38,), 1, _triton_helper_fn_add0)
tmp40 = tmp22.to(tl.int64)
tmp41 = tmp40 + tmp24
tmp42 = tmp40 < 0
tmp43 = tl.where(tmp42, tmp41, tmp40)
tl.device_assert((0 <= tmp43) & (tmp43 < 64),
'index out of bounds: 0 <= tmp43 < 64')
tmp45 = tl.load(in_ptr1 + (64 + tmp43 % 64), None, eviction_policy=
'evict_last')
tmp46 = tmp4 - tmp45
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = tmp46.to(tl.float32)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp52, = tl.associative_scan((tmp51,), 1, _triton_helper_fn_add0)
tmp53 = tmp4 - tmp46
tmp54 = tmp53.to(tl.float32)
tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK])
tmp56, = tl.associative_scan((tmp55,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp21, None)
tl.store(out_ptr5 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp35, None)
tl.store(out_ptr6 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp39, None)
tl.store(out_ptr8 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp52, None)
tl.store(out_ptr9 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp56, None)
tl.store(out_ptr4 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None)
tl.store(out_ptr7 + tl.full([XBLOCK, 1], 0, tl.int32), tmp49, None)
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_2(in_ptr0, in_ptr1,
out_ptr0, out_ptr2, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8,
out_ptr9, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (128 + r0), None)
tmp1 = tl.load(in_ptr1 + (128 + r0), None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp7 = tmp4 - tmp6
tmp8 = r0
tmp9 = tmp8.to(tl.int16)
tmp10 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp11 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12, tmp13 = triton_helpers.sort_with_index(tmp10, tmp11, None, 1,
stable=False, descending=True)
tmp14 = -tmp0
tmp15 = tmp4 - tmp1
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp4
tmp18 = tmp14 * tmp17
tmp19 = tmp4 - tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp21, tmp22 = triton_helpers.sort_with_index(tmp20, tmp11, None, 1,
stable=False, descending=True)
tmp23 = tmp13.to(tl.int64)
tmp24 = tl.full([XBLOCK, RBLOCK], 64, tl.int32)
tmp25 = tmp23 + tmp24
tmp26 = tmp23 < 0
tmp27 = tl.where(tmp26, tmp25, tmp23)
tl.device_assert((0 <= tmp27) & (tmp27 < 64),
'index out of bounds: 0 <= tmp27 < 64')
tmp29 = tl.load(in_ptr1 + (128 + tmp27), None, eviction_policy='evict_last'
)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp32 = tl.sum(tmp30, 1)[:, None]
tmp33 = tmp29.to(tl.float32)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp35, = tl.associative_scan((tmp34,), 1, _triton_helper_fn_add0)
tmp36 = tmp4 - tmp29
tmp37 = tmp36.to(tl.float32)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp39, = tl.associative_scan((tmp38,), 1, _triton_helper_fn_add0)
tmp40 = tmp22.to(tl.int64)
tmp41 = tmp40 + tmp24
tmp42 = tmp40 < 0
tmp43 = tl.where(tmp42, tmp41, tmp40)
tl.device_assert((0 <= tmp43) & (tmp43 < 64),
'index out of bounds: 0 <= tmp43 < 64')
tmp45 = tl.load(in_ptr1 + (128 + tmp43 % 64), None, eviction_policy=
'evict_last')
tmp46 = tmp4 - tmp45
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = tmp46.to(tl.float32)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp52, = tl.associative_scan((tmp51,), 1, _triton_helper_fn_add0)
tmp53 = tmp4 - tmp46
tmp54 = tmp53.to(tl.float32)
tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK])
tmp56, = tl.associative_scan((tmp55,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp21, None)
tl.store(out_ptr5 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp35, None)
tl.store(out_ptr6 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp39, None)
tl.store(out_ptr8 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp52, None)
tl.store(out_ptr9 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp56, None)
tl.store(out_ptr4 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None)
tl.store(out_ptr7 + tl.full([XBLOCK, 1], 0, tl.int32), tmp49, None)
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_3(in_ptr0, in_ptr1,
out_ptr0, out_ptr2, out_ptr4, out_ptr5, out_ptr6, out_ptr7, out_ptr8,
out_ptr9, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (192 + r0), None)
tmp1 = tl.load(in_ptr1 + (192 + r0), None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp7 = tmp4 - tmp6
tmp8 = r0
tmp9 = tmp8.to(tl.int16)
tmp10 = tl.broadcast_to(tmp7, [XBLOCK, RBLOCK])
tmp11 = tl.broadcast_to(tmp9, [XBLOCK, RBLOCK])
tmp12, tmp13 = triton_helpers.sort_with_index(tmp10, tmp11, None, 1,
stable=False, descending=True)
tmp14 = -tmp0
tmp15 = tmp4 - tmp1
tmp16 = tmp15 * tmp2
tmp17 = tmp16 - tmp4
tmp18 = tmp14 * tmp17
tmp19 = tmp4 - tmp18
tmp20 = tl.broadcast_to(tmp19, [XBLOCK, RBLOCK])
tmp21, tmp22 = triton_helpers.sort_with_index(tmp20, tmp11, None, 1,
stable=False, descending=True)
tmp23 = tmp13.to(tl.int64)
tmp24 = tl.full([XBLOCK, RBLOCK], 64, tl.int32)
tmp25 = tmp23 + tmp24
tmp26 = tmp23 < 0
tmp27 = tl.where(tmp26, tmp25, tmp23)
tl.device_assert((0 <= tmp27) & (tmp27 < 64),
'index out of bounds: 0 <= tmp27 < 64')
tmp29 = tl.load(in_ptr1 + (192 + tmp27), None, eviction_policy='evict_last'
)
tmp30 = tl.broadcast_to(tmp29, [XBLOCK, RBLOCK])
tmp32 = tl.sum(tmp30, 1)[:, None]
tmp33 = tmp29.to(tl.float32)
tmp34 = tl.broadcast_to(tmp33, [XBLOCK, RBLOCK])
tmp35, = tl.associative_scan((tmp34,), 1, _triton_helper_fn_add0)
tmp36 = tmp4 - tmp29
tmp37 = tmp36.to(tl.float32)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp39, = tl.associative_scan((tmp38,), 1, _triton_helper_fn_add0)
tmp40 = tmp22.to(tl.int64)
tmp41 = tmp40 + tmp24
tmp42 = tmp40 < 0
tmp43 = tl.where(tmp42, tmp41, tmp40)
tl.device_assert((0 <= tmp43) & (tmp43 < 64),
'index out of bounds: 0 <= tmp43 < 64')
tmp45 = tl.load(in_ptr1 + (192 + tmp43 % 64), None, eviction_policy=
'evict_last')
tmp46 = tmp4 - tmp45
tmp47 = tl.broadcast_to(tmp46, [XBLOCK, RBLOCK])
tmp49 = tl.sum(tmp47, 1)[:, None]
tmp50 = tmp46.to(tl.float32)
tmp51 = tl.broadcast_to(tmp50, [XBLOCK, RBLOCK])
tmp52, = tl.associative_scan((tmp51,), 1, _triton_helper_fn_add0)
tmp53 = tmp4 - tmp46
tmp54 = tmp53.to(tl.float32)
tmp55 = tl.broadcast_to(tmp54, [XBLOCK, RBLOCK])
tmp56, = tl.associative_scan((tmp55,), 1, _triton_helper_fn_add0)
tl.store(out_ptr0 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp12, None)
tl.store(out_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp21, None)
tl.store(out_ptr5 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp35, None)
tl.store(out_ptr6 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp39, None)
tl.store(out_ptr8 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp52, None)
tl.store(out_ptr9 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp56, None)
tl.store(out_ptr4 + tl.full([XBLOCK, 1], 0, tl.int32), tmp32, None)
tl.store(out_ptr7 + tl.full([XBLOCK, 1], 0, tl.int32), tmp49, None)
@triton.jit
def triton_per_fused_add_div_dot_relu_rsub_sub_4(in_out_ptr0, in_out_ptr1,
in_out_ptr2, in_out_ptr3, in_out_ptr4, in_out_ptr5, in_out_ptr6,
in_out_ptr7, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12,
in_ptr13, in_ptr14, in_ptr15, in_ptr16, in_ptr17, in_ptr18, in_ptr19,
in_ptr20, in_ptr21, in_ptr22, in_ptr23, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_out_ptr0 + 0)
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp24 = tl.load(in_ptr1 + r0, None)
tmp26 = tl.load(in_ptr2 + r0, None)
tmp35 = tl.load(in_ptr3 + r0, None)
tmp37 = tl.load(in_out_ptr1 + 0)
tmp38 = tl.broadcast_to(tmp37, [XBLOCK, RBLOCK])
tmp54 = tl.load(in_ptr4 + r0, None)
tmp56 = tl.load(in_ptr5 + r0, None)
tmp65 = tl.load(in_ptr6 + r0, None)
tmp67 = tl.load(in_out_ptr2 + 0)
tmp68 = tl.broadcast_to(tmp67, [XBLOCK, RBLOCK])
tmp84 = tl.load(in_ptr7 + r0, None)
tmp86 = tl.load(in_ptr8 + r0, None)
tmp95 = tl.load(in_ptr9 + r0, None)
tmp97 = tl.load(in_out_ptr3 + 0)
tmp98 = tl.broadcast_to(tmp97, [XBLOCK, RBLOCK])
tmp114 = tl.load(in_ptr10 + r0, None)
tmp116 = tl.load(in_ptr11 + r0, None)
tmp125 = tl.load(in_ptr12 + r0, None)
tmp127 = tl.load(in_out_ptr4 + 0)
tmp128 = tl.broadcast_to(tmp127, [XBLOCK, RBLOCK])
tmp144 = tl.load(in_ptr13 + r0, None)
tmp146 = tl.load(in_ptr14 + r0, None)
tmp155 = tl.load(in_ptr15 + r0, None)
tmp157 = tl.load(in_out_ptr5 + 0)
tmp158 = tl.broadcast_to(tmp157, [XBLOCK, RBLOCK])
tmp174 = tl.load(in_ptr16 + r0, None)
tmp176 = tl.load(in_ptr17 + r0, None)
tmp185 = tl.load(in_ptr18 + r0, None)
tmp187 = tl.load(in_out_ptr6 + 0)
tmp188 = tl.broadcast_to(tmp187, [XBLOCK, RBLOCK])
tmp204 = tl.load(in_ptr19 + r0, None)
tmp206 = tl.load(in_ptr20 + r0, None)
tmp215 = tl.load(in_ptr21 + r0, None)
tmp217 = tl.load(in_out_ptr7 + 0)
tmp218 = tl.broadcast_to(tmp217, [XBLOCK, RBLOCK])
tmp234 = tl.load(in_ptr22 + r0, None)
tmp236 = tl.load(in_ptr23 + r0, None)
tmp1 = tl.full([1, 1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = r0
tmp4 = tl.full([1, 1], 1, tl.int64)
tmp5 = tmp3 >= tmp4
tmp8 = tl.load(in_ptr1 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp9 = tmp7 - tmp8
tmp10 = tl.load(in_ptr2 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp11 = tmp7 + tmp10
tmp12 = tmp9 / tmp11
tmp13 = 1.0
tmp14 = tmp13 - tmp12
tmp15 = tl.load(in_ptr1 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp16 = tmp7 - tmp15
tmp17 = tl.load(in_ptr2 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp18 = tmp7 + tmp17
tmp19 = tmp16 / tmp18
tmp20 = tmp13 - tmp19
tmp21 = tmp14 - tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp5, tmp21, tmp22)
tmp25 = tmp7 - tmp24
tmp27 = tmp7 + tmp26
tmp28 = tmp25 / tmp27
tmp29 = tmp13 - tmp28
tmp30 = tl.where(tmp5, tmp23, tmp29)
tmp31 = tmp2 * tmp30
tmp32 = tl.broadcast_to(tmp31, [XBLOCK, RBLOCK])
tmp34 = tl.sum(tmp32, 1)[:, None]
tmp36 = triton_helpers.maximum(tmp1, tmp35)
tmp39 = tl.load(in_ptr4 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp40 = tmp38 - tmp39
tmp41 = tl.load(in_ptr5 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp42 = tmp38 + tmp41
tmp43 = tmp40 / tmp42
tmp44 = tmp13 - tmp43
tmp45 = tl.load(in_ptr4 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp46 = tmp38 - tmp45
tmp47 = tl.load(in_ptr5 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp48 = tmp38 + tmp47
tmp49 = tmp46 / tmp48
tmp50 = tmp13 - tmp49
tmp51 = tmp44 - tmp50
tmp52 = tl.full(tmp51.shape, 0.0, tmp51.dtype)
tmp53 = tl.where(tmp5, tmp51, tmp52)
tmp55 = tmp38 - tmp54
tmp57 = tmp38 + tmp56
tmp58 = tmp55 / tmp57
tmp59 = tmp13 - tmp58
tmp60 = tl.where(tmp5, tmp53, tmp59)
tmp61 = tmp36 * tmp60
tmp62 = tl.broadcast_to(tmp61, [XBLOCK, RBLOCK])
tmp64 = tl.sum(tmp62, 1)[:, None]
tmp66 = triton_helpers.maximum(tmp1, tmp65)
tmp69 = tl.load(in_ptr7 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp70 = tmp68 - tmp69
tmp71 = tl.load(in_ptr8 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp72 = tmp68 + tmp71
tmp73 = tmp70 / tmp72
tmp74 = tmp13 - tmp73
tmp75 = tl.load(in_ptr7 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp76 = tmp68 - tmp75
tmp77 = tl.load(in_ptr8 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp78 = tmp68 + tmp77
tmp79 = tmp76 / tmp78
tmp80 = tmp13 - tmp79
tmp81 = tmp74 - tmp80
tmp82 = tl.full(tmp81.shape, 0.0, tmp81.dtype)
tmp83 = tl.where(tmp5, tmp81, tmp82)
tmp85 = tmp68 - tmp84
tmp87 = tmp68 + tmp86
tmp88 = tmp85 / tmp87
tmp89 = tmp13 - tmp88
tmp90 = tl.where(tmp5, tmp83, tmp89)
tmp91 = tmp66 * tmp90
tmp92 = tl.broadcast_to(tmp91, [XBLOCK, RBLOCK])
tmp94 = tl.sum(tmp92, 1)[:, None]
tmp96 = triton_helpers.maximum(tmp1, tmp95)
tmp99 = tl.load(in_ptr10 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp100 = tmp98 - tmp99
tmp101 = tl.load(in_ptr11 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp102 = tmp98 + tmp101
tmp103 = tmp100 / tmp102
tmp104 = tmp13 - tmp103
tmp105 = tl.load(in_ptr10 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp106 = tmp98 - tmp105
tmp107 = tl.load(in_ptr11 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp108 = tmp98 + tmp107
tmp109 = tmp106 / tmp108
tmp110 = tmp13 - tmp109
tmp111 = tmp104 - tmp110
tmp112 = tl.full(tmp111.shape, 0.0, tmp111.dtype)
tmp113 = tl.where(tmp5, tmp111, tmp112)
tmp115 = tmp98 - tmp114
tmp117 = tmp98 + tmp116
tmp118 = tmp115 / tmp117
tmp119 = tmp13 - tmp118
tmp120 = tl.where(tmp5, tmp113, tmp119)
tmp121 = tmp96 * tmp120
tmp122 = tl.broadcast_to(tmp121, [XBLOCK, RBLOCK])
tmp124 = tl.sum(tmp122, 1)[:, None]
tmp126 = triton_helpers.maximum(tmp1, tmp125)
tmp129 = tl.load(in_ptr13 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp130 = tmp128 - tmp129
tmp131 = tl.load(in_ptr14 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp132 = tmp128 + tmp131
tmp133 = tmp130 / tmp132
tmp134 = tmp13 - tmp133
tmp135 = tl.load(in_ptr13 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp136 = tmp128 - tmp135
tmp137 = tl.load(in_ptr14 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp138 = tmp128 + tmp137
tmp139 = tmp136 / tmp138
tmp140 = tmp13 - tmp139
tmp141 = tmp134 - tmp140
tmp142 = tl.full(tmp141.shape, 0.0, tmp141.dtype)
tmp143 = tl.where(tmp5, tmp141, tmp142)
tmp145 = tmp128 - tmp144
tmp147 = tmp128 + tmp146
tmp148 = tmp145 / tmp147
tmp149 = tmp13 - tmp148
tmp150 = tl.where(tmp5, tmp143, tmp149)
tmp151 = tmp126 * tmp150
tmp152 = tl.broadcast_to(tmp151, [XBLOCK, RBLOCK])
tmp154 = tl.sum(tmp152, 1)[:, None]
tmp156 = triton_helpers.maximum(tmp1, tmp155)
tmp159 = tl.load(in_ptr16 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp160 = tmp158 - tmp159
tmp161 = tl.load(in_ptr17 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp162 = tmp158 + tmp161
tmp163 = tmp160 / tmp162
tmp164 = tmp13 - tmp163
tmp165 = tl.load(in_ptr16 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp166 = tmp158 - tmp165
tmp167 = tl.load(in_ptr17 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp168 = tmp158 + tmp167
tmp169 = tmp166 / tmp168
tmp170 = tmp13 - tmp169
tmp171 = tmp164 - tmp170
tmp172 = tl.full(tmp171.shape, 0.0, tmp171.dtype)
tmp173 = tl.where(tmp5, tmp171, tmp172)
tmp175 = tmp158 - tmp174
tmp177 = tmp158 + tmp176
tmp178 = tmp175 / tmp177
tmp179 = tmp13 - tmp178
tmp180 = tl.where(tmp5, tmp173, tmp179)
tmp181 = tmp156 * tmp180
tmp182 = tl.broadcast_to(tmp181, [XBLOCK, RBLOCK])
tmp184 = tl.sum(tmp182, 1)[:, None]
tmp186 = triton_helpers.maximum(tmp1, tmp185)
tmp189 = tl.load(in_ptr19 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp190 = tmp188 - tmp189
tmp191 = tl.load(in_ptr20 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp192 = tmp188 + tmp191
tmp193 = tmp190 / tmp192
tmp194 = tmp13 - tmp193
tmp195 = tl.load(in_ptr19 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp196 = tmp188 - tmp195
tmp197 = tl.load(in_ptr20 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp198 = tmp188 + tmp197
tmp199 = tmp196 / tmp198
tmp200 = tmp13 - tmp199
tmp201 = tmp194 - tmp200
tmp202 = tl.full(tmp201.shape, 0.0, tmp201.dtype)
tmp203 = tl.where(tmp5, tmp201, tmp202)
tmp205 = tmp188 - tmp204
tmp207 = tmp188 + tmp206
tmp208 = tmp205 / tmp207
tmp209 = tmp13 - tmp208
tmp210 = tl.where(tmp5, tmp203, tmp209)
tmp211 = tmp186 * tmp210
tmp212 = tl.broadcast_to(tmp211, [XBLOCK, RBLOCK])
tmp214 = tl.sum(tmp212, 1)[:, None]
tmp216 = triton_helpers.maximum(tmp1, tmp215)
tmp219 = tl.load(in_ptr22 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp220 = tmp218 - tmp219
tmp221 = tl.load(in_ptr23 + tl.broadcast_to(r0, [XBLOCK, RBLOCK]), tmp5,
other=0.0)
tmp222 = tmp218 + tmp221
tmp223 = tmp220 / tmp222
tmp224 = tmp13 - tmp223
tmp225 = tl.load(in_ptr22 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp226 = tmp218 - tmp225
tmp227 = tl.load(in_ptr23 + tl.broadcast_to(-1 + r0, [XBLOCK, RBLOCK]),
tmp5, other=0.0)
tmp228 = tmp218 + tmp227
tmp229 = tmp226 / tmp228
tmp230 = tmp13 - tmp229
tmp231 = tmp224 - tmp230
tmp232 = tl.full(tmp231.shape, 0.0, tmp231.dtype)
tmp233 = tl.where(tmp5, tmp231, tmp232)
tmp235 = tmp218 - tmp234
tmp237 = tmp218 + tmp236
tmp238 = tmp235 / tmp237
tmp239 = tmp13 - tmp238
tmp240 = tl.where(tmp5, tmp233, tmp239)
tmp241 = tmp216 * tmp240
tmp242 = tl.broadcast_to(tmp241, [XBLOCK, RBLOCK])
tmp244 = tl.sum(tmp242, 1)[:, None]
tmp245 = tmp34 + tmp64
tmp246 = tmp245 + tmp94
tmp247 = tmp246 + tmp124
tmp248 = 0.25
tmp249 = tmp247 * tmp248
tmp250 = tmp154 + tmp184
tmp251 = tmp250 + tmp214
tmp252 = tmp251 + tmp244
tmp253 = tmp252 * tmp248
tmp254 = tmp249 + tmp253
tmp255 = 0.5
tmp256 = tmp254 * tmp255
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp256, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64,), (1,), torch.float32)
buf20 = empty_strided_cuda((64,), (1,), torch.float32)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((64,), (1,), torch.float32)
buf4 = empty_strided_cuda((64,), (1,), torch.float32)
buf22 = empty_strided_cuda((), (), torch.float32)
buf23 = empty_strided_cuda((64,), (1,), torch.float32)
buf24 = empty_strided_cuda((64,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0[grid(1)](arg0_1,
arg1_1, buf0, buf20, buf2, buf3, buf4, buf22, buf23, buf24, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = empty_strided_cuda((64,), (1,), torch.float32)
buf25 = empty_strided_cuda((64,), (1,), torch.float32)
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = empty_strided_cuda((64,), (1,), torch.float32)
buf9 = empty_strided_cuda((64,), (1,), torch.float32)
buf27 = empty_strided_cuda((), (), torch.float32)
buf28 = empty_strided_cuda((64,), (1,), torch.float32)
buf29 = empty_strided_cuda((64,), (1,), torch.float32)
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_1[grid(1)](arg0_1,
arg1_1, buf5, buf25, buf7, buf8, buf9, buf27, buf28, buf29, 1,
64, XBLOCK=1, num_warps=2, num_stages=1)
buf10 = empty_strided_cuda((64,), (1,), torch.float32)
buf30 = empty_strided_cuda((64,), (1,), torch.float32)
buf12 = empty_strided_cuda((), (), torch.float32)
buf13 = empty_strided_cuda((64,), (1,), torch.float32)
buf14 = empty_strided_cuda((64,), (1,), torch.float32)
buf32 = empty_strided_cuda((), (), torch.float32)
buf33 = empty_strided_cuda((64,), (1,), torch.float32)
buf34 = empty_strided_cuda((64,), (1,), torch.float32)
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_2[grid(1)](arg0_1,
arg1_1, buf10, buf30, buf12, buf13, buf14, buf32, buf33, buf34,
1, 64, XBLOCK=1, num_warps=2, num_stages=1)
buf15 = empty_strided_cuda((64,), (1,), torch.float32)
buf35 = empty_strided_cuda((64,), (1,), torch.float32)
buf17 = empty_strided_cuda((), (), torch.float32)
buf18 = empty_strided_cuda((64,), (1,), torch.float32)
buf19 = empty_strided_cuda((64,), (1,), torch.float32)
buf37 = empty_strided_cuda((), (), torch.float32)
buf38 = empty_strided_cuda((64,), (1,), torch.float32)
buf39 = empty_strided_cuda((64,), (1,), torch.float32)
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_3[grid(1)](arg0_1,
arg1_1, buf15, buf35, buf17, buf18, buf19, buf37, buf38, buf39,
1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf40 = buf2
del buf2
buf41 = buf7
del buf7
buf42 = buf12
del buf12
buf43 = buf17
del buf17
buf44 = buf22
del buf22
buf45 = buf27
del buf27
buf46 = buf32
del buf32
buf47 = buf37
del buf37
buf48 = buf40
del buf40
triton_per_fused_add_div_dot_relu_rsub_sub_4[grid(1)](buf48, buf41,
buf42, buf43, buf44, buf45, buf46, buf47, buf0, buf3, buf4,
buf5, buf8, buf9, buf10, buf13, buf14, buf15, buf18, buf19,
buf20, buf23, buf24, buf25, buf28, buf29, buf30, buf33, buf34,
buf35, buf38, buf39, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf10
del buf13
del buf14
del buf15
del buf18
del buf19
del buf20
del buf23
del buf24
del buf25
del buf28
del buf29
del buf3
del buf30
del buf33
del buf34
del buf35
del buf38
del buf39
del buf4
del buf41
del buf42
del buf43
del buf44
del buf45
del buf46
del buf47
del buf5
del buf8
del buf9
return buf48,
def jaccard(outputs, targets, per_image=False, non_empty=False, min_pixels=5):
batch_size = outputs.size()[0]
eps = 0.001
if not per_image:
batch_size = 1
dice_target = targets.contiguous().view(batch_size, -1).float()
dice_output = outputs.contiguous().view(batch_size, -1)
target_sum = torch.sum(dice_target, dim=1)
intersection = torch.sum(dice_output * dice_target, dim=1)
losses = 1.0 - (intersection + eps) / (torch.sum(dice_output +
dice_target, dim=1) - intersection + eps)
if non_empty:
assert per_image
non_empty_images = 0
sum_loss = 0
for i in range(batch_size):
if target_sum[i] > min_pixels:
sum_loss += losses[i]
non_empty_images += 1
if non_empty_images == 0:
return 0
else:
return sum_loss / non_empty_images
return losses.mean()
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = labels != ignore
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts.float() - gt_sorted.float().cumsum(0)
union = gts.float() + (1 - gt_sorted).float().cumsum(0)
jaccard = 1.0 - intersection / union
if p > 1:
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def lovasz_hinge_flat(logits, labels):
if len(labels) == 0:
return logits.sum() * 0.0
signs = 2.0 * labels.float() - 1.0
errors = 1.0 - logits * signs
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), grad)
return loss
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
ll = iter(l)
if ignore_nan:
ll = filterfalse(np.isnan, ll)
try:
n = 1
acc = next(ll)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(ll, 2):
acc += v
if n == 1:
return acc
return acc / n
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
if per_image:
loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(
0), lab.unsqueeze(0), ignore)) for log, lab in zip(logits, labels))
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore)
)
return loss
def symmetric_lovasz(outputs, targets):
return (lovasz_hinge(outputs, targets) + lovasz_hinge(-outputs, 1 -
targets)) / 2
class LovaszLossNew(nn.Module):
def __init__(self, ignore_index=255, per_image=True):
super().__init__()
self.ignore_index = ignore_index
self.per_image = per_image
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| kevinkwshin/kaggle-pneumothorax | LovaszLoss | false | 16,124 | [
"MIT"
] | 74 | 24b91a9425097023f0cc7781a9380cb247babe22 | https://github.com/kevinkwshin/kaggle-pneumothorax/tree/24b91a9425097023f0cc7781a9380cb247babe22 |
RZTXDecoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qw/cqw7yoyglmtjad3kirznl5odetqfs3k6pjtnfdbzklyhsdvuvgft.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rh/crhjfwyl6xoj5ylcsbbh6lp2vlegits2zkdej3b3wb2q4ddfnejv.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kd/ckd4b7j2g3icd6ulyhdolihj5jgm7mem27ldyigskshas64ai6mj.py
# Topologically Sorted Source Nodes: [mul, tgt], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul_1
# tgt => add
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %primals_6), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul_1), kwargs = {})
triton_poi_fused_add_mul_4 = async_compile.triton('triton_poi_fused_add_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kk/ckkiaccdbgsbbstnsbihpkxei6ifjugcmotfs26mi2mu3yp7q5nh.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_13), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (12, 4), (4, 1))
assert_size_stride(primals_9, (12, ), (1, ))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (2048, 4), (4, 1))
assert_size_stride(primals_13, (2048, ), (1, ))
assert_size_stride(primals_14, (4, 2048), (2048, 1))
assert_size_stride(primals_15, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, tgt], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_4.run(primals_1, buf9, primals_6, buf10, 16, grid=grid(16), stream=stream0)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf10, reinterpret_tensor(primals_8, (4, 4), (1, 4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_9, (4, ), (1, ), 4), primals_7, reinterpret_tensor(primals_8, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf12)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_9, (4, ), (1, ), 8), primals_7, reinterpret_tensor(primals_8, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf13)
buf14 = reinterpret_tensor(buf11, (4, 4, 1), (1, 4, 16), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.mul]
triton_poi_fused_mul_0.run(buf14, primals_9, 16, grid=grid(16), stream=stream0)
del primals_9
buf15 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf14, reinterpret_tensor(buf12, (4, 1, 4), (1, 1, 4), 0), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf15, buf16, 64, grid=grid(64), stream=stream0)
buf17 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf16, buf17, 64, grid=grid(64), stream=stream0)
del buf16
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf17, reinterpret_tensor(buf13, (4, 4, 1), (1, 4, 1), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf18, buf19, 4, 4, grid=grid(4, 4), stream=stream0)
buf20 = reinterpret_tensor(buf18, (4, 4), (4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf19, (4, 4), (4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf20)
del primals_11
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, tgt_1], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_4.run(buf10, buf20, primals_6, buf21, 16, grid=grid(16), stream=stream0)
buf22 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf21, reinterpret_tensor(primals_12, (4, 2048), (1, 4), 0), out=buf22)
buf23 = buf22; del buf22 # reuse
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf23, primals_13, 8192, grid=grid(8192), stream=stream0)
del primals_13
buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tgt2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_15, buf23, reinterpret_tensor(primals_14, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf24)
del primals_15
buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, tgt_2], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_4.run(buf21, buf24, primals_6, buf25, 16, grid=grid(16), stream=stream0)
return (buf25, primals_6, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, buf10, primals_7, buf17, reinterpret_tensor(buf19, (4, 4), (4, 1), 0), buf20, buf21, buf23, buf24, primals_14, primals_12, primals_10, reinterpret_tensor(buf13, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf14, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf12, (4, 4, 1), (1, 4, 1), 0), reinterpret_tensor(primals_8, (4, 4), (4, 1), 0), primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class RZTXDecoderLayer(nn.Module):
"""RZTXDecoderLayer is made up of self-attn and feedforward network with
residual weights for faster convergece.
This encoder layer is based on the paper "ReZero is All You Need:
Fast Convergence at Large Depth".
Thomas Bachlechner∗, Bodhisattwa Prasad Majumder∗, Huanru Henry Mao∗,
Garrison W. Cottrell, Julian McAuley. 2020.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
use_res_init: Use residual initialization
Examples::
>>> decoder_layer = RZTXDecoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = decoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout
=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.resweight = nn.Parameter(torch.Tensor([0]))
if activation == 'relu':
self.activation = F.relu
elif activation == 'gelu':
self.activation = F.gelu
def forward(self, tgt, memory, tgt_mask=None, memory_mask=None,
tgt_key_padding_mask=None, memory_key_padding_mask=None):
"""Pass the inputs (and mask) through the decoder layer.
Args:
tgt: the sequence to the decoder layer (required).
memory: the sequnce from the last layer of the encoder (required).
tgt_mask: the mask for the tgt sequence (optional).
memory_mask: the mask for the memory sequence (optional).
tgt_key_padding_mask: the mask for the tgt keys per batch (optional).
memory_key_padding_mask: the mask for the memory keys per batch (optional).
Shape:
see the docs in PyTroch Transformer class.
"""
tgt2 = self.self_attn(tgt, tgt, tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2) * self.resweight
tgt2 = self.multihead_attn(tgt, memory, memory, attn_mask=
memory_mask, key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2) * self.resweight
if hasattr(self, 'activation'):
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(
tgt))))
else:
tgt2 = self.linear2(self.dropout(F.relu(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2) * self.resweight
return tgt
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (12, 4), (4, 1))
assert_size_stride(primals_9, (12,), (1,))
assert_size_stride(primals_10, (4, 4), (4, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (2048, 4), (4, 1))
assert_size_stride(primals_13, (2048,), (1,))
assert_size_stride(primals_14, (4, 2048), (2048, 1))
assert_size_stride(primals_15, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4,
1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_4[grid(16)](primals_1, buf9, primals_6,
buf10, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_8, (4, 4), (1,
4), 0), out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_9, (4,), (1,), 4),
primals_7, reinterpret_tensor(primals_8, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf12)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_9, (4,), (1,), 8),
primals_7, reinterpret_tensor(primals_8, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf13)
buf14 = reinterpret_tensor(buf11, (4, 4, 1), (1, 4, 16), 0)
del buf11
triton_poi_fused_mul_0[grid(16)](buf14, primals_9, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_9
buf15 = buf5
del buf5
extern_kernels.bmm(buf14, reinterpret_tensor(buf12, (4, 1, 4), (1,
1, 4), 0), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = buf15
del buf15
triton_poi_fused__softmax_2[grid(64)](buf16, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf16
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf17, reinterpret_tensor(buf13, (4, 4, 1), (1,
4, 1), 0), out=buf18)
buf19 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf18, buf19, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf20 = reinterpret_tensor(buf18, (4, 4), (4, 1), 0)
del buf18
extern_kernels.addmm(primals_11, reinterpret_tensor(buf19, (4, 4),
(4, 1), 0), reinterpret_tensor(primals_10, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf20)
del primals_11
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_4[grid(16)](buf10, buf20, primals_6, buf21,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf22 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.mm(buf21, reinterpret_tensor(primals_12, (4, 2048),
(1, 4), 0), out=buf22)
buf23 = buf22
del buf22
triton_poi_fused_relu_5[grid(8192)](buf23, primals_13, 8192, XBLOCK
=128, num_warps=4, num_stages=1)
del primals_13
buf24 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_15, buf23, reinterpret_tensor(
primals_14, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf24)
del primals_15
buf25 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_4[grid(16)](buf21, buf24, primals_6, buf25,
16, XBLOCK=16, num_warps=1, num_stages=1)
return (buf25, primals_6, primals_1, buf6, reinterpret_tensor(buf8, (4,
4), (4, 1), 0), buf9, buf10, primals_7, buf17, reinterpret_tensor(
buf19, (4, 4), (4, 1), 0), buf20, buf21, buf23, buf24, primals_14,
primals_12, primals_10, reinterpret_tensor(buf13, (4, 1, 4), (1, 1,
4), 0), reinterpret_tensor(buf14, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf12, (4, 4, 1), (1, 4, 1), 0),
reinterpret_tensor(primals_8, (4, 4), (4, 1), 0), primals_4,
reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0))
class RZTXDecoderLayerNew(nn.Module):
"""RZTXDecoderLayer is made up of self-attn and feedforward network with
residual weights for faster convergece.
This encoder layer is based on the paper "ReZero is All You Need:
Fast Convergence at Large Depth".
Thomas Bachlechner∗, Bodhisattwa Prasad Majumder∗, Huanru Henry Mao∗,
Garrison W. Cottrell, Julian McAuley. 2020.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
use_res_init: Use residual initialization
Examples::
>>> decoder_layer = RZTXDecoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = decoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout
=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.resweight = nn.Parameter(torch.Tensor([0]))
if activation == 'relu':
self.activation = F.relu
elif activation == 'gelu':
self.activation = F.gelu
def forward(self, input_0, input_1):
primals_6 = self.resweight
primals_2 = self.self_attn.in_proj_weight
primals_3 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_8 = self.multihead_attn.in_proj_weight
primals_9 = self.multihead_attn.in_proj_bias
primals_4 = self.multihead_attn.out_proj.weight
primals_11 = self.multihead_attn.out_proj.bias
primals_12 = self.linear1.weight
primals_13 = self.linear1.bias
primals_14 = self.linear2.weight
primals_15 = self.linear2.bias
primals_7 = input_0
primals_10 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15])
return output[0]
| mpariente/rezero | RZTXDecoderLayer | false | 16,125 | [
"MIT"
] | 376 | 6bcf1df00bc9a3560b093a2bbe12dade92f86eba | https://github.com/mpariente/rezero/tree/6bcf1df00bc9a3560b093a2bbe12dade92f86eba |
LearnableClsToken | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3n/c3nwvncjabbikbwlvgx3biwyuvbsyjkt4tnmuaklmv6mbbz5evjy.py
# Topologically Sorted Source Nodes: [features], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# features => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%repeat, %primals_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 5
x0 = xindex % 4
x2 = (xindex // 20)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (4*((-1) + x1)) + (16*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5u/c5uvtkyaah5223bsskz53et736xlvhgkzmz52s6uh22bz7o4r2qd.py
# Topologically Sorted Source Nodes: [mask], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# mask => cat_1
# Graph fragment:
# %cat_1 : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%convert_element_type, %primals_4], 1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp5 = tl.load(in_ptr0 + (0))
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = 0.0
tmp8 = tmp6 * tmp7
tmp9 = (tmp8 != 0)
tmp10 = tmp9.to(tl.float32)
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tmp14 = tl.full([1], 5, tl.int64)
tmp15 = tmp0 < tmp14
tmp16 = tl.load(in_ptr1 + ((4*x1) + ((-1) + x0)), tmp13 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tl.where(tmp4, tmp12, tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5l/c5lliaqgyrspzg2jxxl7iajvjmkw3bieuocq5nmvkaqji3ud3ztb.py
# Topologically Sorted Source Nodes: [lengths], Original ATen: [aten.add]
# Source node to ATen node mapping:
# lengths => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_5, 1), kwargs = {})
triton_poi_fused_add_2 = async_compile.triton('triton_poi_fused_add_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 5, 4), (20, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [features], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_2, primals_1, buf0, 80, grid=grid(80), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
# Topologically Sorted Source Nodes: [mask], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(primals_3, primals_4, buf1, 20, grid=grid(20), stream=stream0)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [lengths], Original ATen: [aten.add]
triton_poi_fused_add_2.run(primals_5, buf2, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf0, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch as th
from torch import nn
class LearnableClsToken(nn.Module):
"""
Layer that adds learnable CLS tokens to sequence input.
"""
def __init__(self, d_model: 'int'):
super().__init__()
cls_token = th.zeros(d_model)
self.cls_param = nn.Parameter(cls_token, requires_grad=True)
self.fixed_ones = nn.Parameter(th.ones(1), requires_grad=False)
def forward(self, features, mask, lengths):
"""
CLS Token forward.
"""
batch, _seq_len, _d_model = features.shape
features = th.cat([self.cls_param.unsqueeze(0).unsqueeze(0).repeat(
batch, 1, 1), features], dim=1)
assert th.all(features[0, 0, :] == self.cls_param)
zeros = (self.fixed_ones.unsqueeze(0).repeat(batch, 1) * 0).bool()
mask = th.cat([zeros, mask], dim=1)
lengths = lengths + 1
return features, mask, lengths
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4]), torch.rand([4, 4, 4, 4])
]
def get_init_inputs():
return [[], {'d_model': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch as th
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 80
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 5
x0 = xindex % 4
x2 = xindex // 20
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 4 * (-1 + x1) + 16 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 20
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp5 = tl.load(in_ptr0 + 0)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK])
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp7 = 0.0
tmp8 = tmp6 * tmp7
tmp9 = tmp8 != 0
tmp10 = tmp9.to(tl.float32)
tmp11 = tl.full(tmp10.shape, 0.0, tmp10.dtype)
tmp12 = tl.where(tmp4, tmp10, tmp11)
tmp13 = tmp0 >= tmp3
tl.full([1], 5, tl.int64)
tmp16 = tl.load(in_ptr1 + (4 * x1 + (-1 + x0)), tmp13 & xmask,
eviction_policy='evict_last', other=0.0)
tmp17 = tl.where(tmp4, tmp12, tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 + tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 5, 4), (20, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(80)](primals_2, primals_1, buf0, 80,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 5), (5, 1), torch.float32)
triton_poi_fused_cat_1[grid(20)](primals_3, primals_4, buf1, 20,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_3
del primals_4
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_2[grid(256)](primals_5, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_5
return buf0, buf1, buf2
class LearnableClsTokenNew(nn.Module):
"""
Layer that adds learnable CLS tokens to sequence input.
"""
def __init__(self, d_model: 'int'):
super().__init__()
cls_token = th.zeros(d_model)
self.cls_param = nn.Parameter(cls_token, requires_grad=True)
self.fixed_ones = nn.Parameter(th.ones(1), requires_grad=False)
def forward(self, input_0, input_1, input_2):
primals_2 = self.cls_param
primals_3 = self.fixed_ones
primals_1 = input_0
primals_4 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0], output[1], output[2]
| mzolfaghari/coot-videotext | LearnableClsToken | false | 16,126 | [
"Apache-2.0"
] | 213 | ee09c56c2600f56581167773d7f7dc5d036cc5e6 | https://github.com/mzolfaghari/coot-videotext/tree/ee09c56c2600f56581167773d7f7dc5d036cc5e6 |
RZTXEncoderLayer | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qw/cqw7yoyglmtjad3kirznl5odetqfs3k6pjtnfdbzklyhsdvuvgft.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# multi_head_attention_forward => mul
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_3, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hz/chzi3aam26mikdhljz5x7jlqazm7kpktzeptsf36thgfhsg7ub6a.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%bmm, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/em/cem6qbxwbiqnjqybzk5arf2obt5uggy4qs7otwwpovvnrhvdc6h4.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# multi_head_attention_forward => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rh/crhjfwyl6xoj5ylcsbbh6lp2vlegits2zkdej3b3wb2q4ddfnejv.py
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# multi_head_attention_forward => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_7,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kd/ckd4b7j2g3icd6ulyhdolihj5jgm7mem27ldyigskshas64ai6mj.py
# Topologically Sorted Source Nodes: [src2, src], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# src => add
# src2 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %primals_6), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul_1), kwargs = {})
triton_poi_fused_add_mul_4 = async_compile.triton('triton_poi_fused_add_mul_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp2 = tl.load(in_ptr2 + (0))
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kk/ckkiaccdbgsbbstnsbihpkxei6ifjugcmotfs26mi2mu3yp7q5nh.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (1, ), (1, ))
assert_size_stride(primals_7, (2048, 4), (4, 1))
assert_size_stride(primals_8, (2048, ), (1, ))
assert_size_stride(primals_9, (4, 2048), (2048, 1))
assert_size_stride(primals_10, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 4), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_3, (4, ), (1, ), 8), primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1, 4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.bmm]
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4, 1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [multi_head_attention_forward], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src2, src], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_4.run(primals_1, buf9, primals_6, buf10, 16, grid=grid(16), stream=stream0)
buf11 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf10, reinterpret_tensor(primals_7, (4, 2048), (1, 4), 0), out=buf11)
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf12, primals_8, 8192, grid=grid(8192), stream=stream0)
del primals_8
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src2_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_10, buf12, reinterpret_tensor(primals_9, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [src2_2, src_1], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_4.run(buf10, buf13, primals_6, buf14, 16, grid=grid(16), stream=stream0)
return (buf14, primals_6, primals_1, buf6, reinterpret_tensor(buf8, (4, 4), (4, 1), 0), buf9, buf10, buf12, buf13, primals_9, primals_7, primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((2048, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((2048, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 2048), (2048, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
from torch.nn.modules.activation import MultiheadAttention
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.linear import Linear
class RZTXEncoderLayer(Module):
"""RZTXEncoderLayer is made up of self-attn and feedforward network with
residual weights for faster convergece.
This encoder layer is based on the paper "ReZero is All You Need:
Fast Convergence at Large Depth".
Thomas Bachlechner∗, Bodhisattwa Prasad Majumder∗, Huanru Henry Mao∗,
Garrison W. Cottrell, Julian McAuley. 2020.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
use_res_init: Use residual initialization
Examples::
>>> encoder_layer = RZTXEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super().__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.resweight = nn.Parameter(torch.Tensor([0]))
if activation == 'relu':
self.activation = F.relu
elif activation == 'gelu':
self.activation = F.gelu
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super().__setstate__(state)
def forward(self, src, src_mask=None, src_key_padding_mask=None):
"""Pass the input through the encoder layer.
Args:
src: the sequence to the encoder layer (required).
src_mask: the mask for the src sequence (optional).
src_key_padding_mask: the mask for the src keys per batch (optional).
Shape:
see the docs in PyTroch Transformer class.
"""
src2 = src
src2 = self.self_attn(src2, src2, src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)
src2 = src2[0]
src2 = src2 * self.resweight
src = src + self.dropout1(src2)
src2 = src
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src2 = src2 * self.resweight
src = src + self.dropout2(src2)
return src
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'d_model': 4, 'nhead': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
from torch.nn.modules.activation import MultiheadAttention
from torch.nn.modules.dropout import Dropout
from torch.nn.modules.linear import Linear
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_mul_4(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp2 = tl.load(in_ptr2 + 0)
tmp3 = tl.broadcast_to(tmp2, [XBLOCK])
tmp4 = tmp1 * tmp3
tmp5 = tmp0 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 2048
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (12, 4), (4, 1))
assert_size_stride(primals_3, (12,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (1,), (1,))
assert_size_stride(primals_7, (2048, 4), (4, 1))
assert_size_stride(primals_8, (2048,), (1,))
assert_size_stride(primals_9, (4, 2048), (2048, 1))
assert_size_stride(primals_10, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, reinterpret_tensor(primals_2, (4, 4),
(1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 4),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 16),
alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_3, (4,), (1,), 8),
primals_1, reinterpret_tensor(primals_2, (4, 4), (1, 4), 32),
alpha=1, beta=1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf0, (4, 4, 1), (1, 4, 16), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](buf3, primals_3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_3
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf3, reinterpret_tensor(buf1, (4, 1, 4), (1, 1,
4), 0), out=buf4)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused__softmax_2[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf5
buf7 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf6, reinterpret_tensor(buf2, (4, 4, 1), (1, 4,
1), 0), out=buf7)
buf8 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
triton_poi_fused_clone_3[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf9 = reinterpret_tensor(buf7, (4, 4), (4, 1), 0)
del buf7
extern_kernels.addmm(primals_5, reinterpret_tensor(buf8, (4, 4), (4,
1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha
=1, beta=1, out=buf9)
del primals_5
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_4[grid(16)](primals_1, buf9, primals_6,
buf10, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf11 = empty_strided_cuda((4, 2048), (2048, 1), torch.float32)
extern_kernels.mm(buf10, reinterpret_tensor(primals_7, (4, 2048), (
1, 4), 0), out=buf11)
buf12 = buf11
del buf11
triton_poi_fused_relu_5[grid(8192)](buf12, primals_8, 8192, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_8
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_10, buf12, reinterpret_tensor(
primals_9, (2048, 4), (1, 2048), 0), alpha=1, beta=1, out=buf13)
del primals_10
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_mul_4[grid(16)](buf10, buf13, primals_6, buf14,
16, XBLOCK=16, num_warps=1, num_stages=1)
return (buf14, primals_6, primals_1, buf6, reinterpret_tensor(buf8, (4,
4), (4, 1), 0), buf9, buf10, buf12, buf13, primals_9, primals_7,
primals_4, reinterpret_tensor(buf2, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf3, (4, 1, 4), (1, 1, 4), 0),
reinterpret_tensor(buf1, (4, 4, 1), (1, 4, 1), 0))
class RZTXEncoderLayerNew(Module):
"""RZTXEncoderLayer is made up of self-attn and feedforward network with
residual weights for faster convergece.
This encoder layer is based on the paper "ReZero is All You Need:
Fast Convergence at Large Depth".
Thomas Bachlechner∗, Bodhisattwa Prasad Majumder∗, Huanru Henry Mao∗,
Garrison W. Cottrell, Julian McAuley. 2020.
Args:
d_model: the number of expected features in the input (required).
nhead: the number of heads in the multiheadattention models (required).
dim_feedforward: the dimension of the feedforward network model (default=2048).
dropout: the dropout value (default=0.1).
activation: the activation function of intermediate layer, relu or gelu (default=relu).
use_res_init: Use residual initialization
Examples::
>>> encoder_layer = RZTXEncoderLayer(d_model=512, nhead=8)
>>> src = torch.rand(10, 32, 512)
>>> out = encoder_layer(src)
"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation='relu'):
super().__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model)
self.dropout1 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.resweight = nn.Parameter(torch.Tensor([0]))
if activation == 'relu':
self.activation = F.relu
elif activation == 'gelu':
self.activation = F.gelu
def __setstate__(self, state):
if 'activation' not in state:
state['activation'] = F.relu
super().__setstate__(state)
def forward(self, input_0):
primals_6 = self.resweight
primals_2 = self.self_attn.in_proj_weight
primals_3 = self.self_attn.in_proj_bias
primals_1 = self.self_attn.out_proj.weight
primals_5 = self.self_attn.out_proj.bias
primals_7 = self.linear1.weight
primals_8 = self.linear1.bias
primals_9 = self.linear2.weight
primals_10 = self.linear2.bias
primals_4 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9, primals_10])
return output[0]
| mpariente/rezero | RZTXEncoderLayer | false | 16,127 | [
"MIT"
] | 376 | 6bcf1df00bc9a3560b093a2bbe12dade92f86eba | https://github.com/mpariente/rezero/tree/6bcf1df00bc9a3560b093a2bbe12dade92f86eba |
AddCoords | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/4r/c4rutd465an3gtvvxg6oun4mp4lnsb3j62d43pj3sv7fd3i7yk5c.py
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# ret => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%arg0_1, %sub, %sub_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 6
x3 = (xindex // 96)
x4 = xindex % 16
x0 = xindex % 4
x1 = (xindex // 4) % 4
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + (16*x2) + (64*x3)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = x0
tmp11 = tmp10.to(tl.float32)
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tmp16 = 1.0
tmp17 = tmp15 - tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 6, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = x1
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp14
tmp27 = tmp26 - tmp16
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tl.where(tmp9, tmp19, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + (x5), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, buf0, 384, grid=grid(384), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class AddCoords(nn.Module):
"""
Add Coords to a tensor
"""
def __init__(self, with_r=False):
super(AddCoords, self).__init__()
self.with_r = with_r
def forward(self, x):
"""
:param x: shape (batch, channel, x_dim, y_dim)
:return: shape (batch, channel+2, x_dim, y_dim)
"""
B, _, x_dim, y_dim = x.size()
xx_channel = torch.arange(x_dim).repeat(B, 1, y_dim, 1).type_as(x)
yy_cahnnel = torch.arange(y_dim).repeat(B, 1, x_dim, 1).permute(0,
1, 3, 2).type_as(x)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_cahnnel = yy_cahnnel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_cahnnel = yy_cahnnel * 2 - 1
ret = torch.cat([x, xx_channel, yy_cahnnel], dim=1)
if self.with_r:
rr = torch.sqrt(xx_channel ** 2 + yy_cahnnel ** 2)
ret = torch.cat([ret, rr], dim=1)
return ret
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 6
x3 = xindex // 96
x4 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 64 * x3), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = x0
tmp11 = tmp10.to(tl.float32)
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tmp16 = 1.0
tmp17 = tmp15 - tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tl.full([1], 6, tl.int64)
tmp23 = x1
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp14
tmp27 = tmp26 - tmp16
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tl.where(tmp9, tmp19, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + x5, tmp31, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](arg0_1, buf0, 384, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class AddCoordsNew(nn.Module):
"""
Add Coords to a tensor
"""
def __init__(self, with_r=False):
super(AddCoordsNew, self).__init__()
self.with_r = with_r
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| nandbhat/dressing-in-order | AddCoords | false | 16,128 | [
"BSD-3-Clause"
] | 172 | 93ed967f588de9f3f80dcc40c51d5790569fbcab | https://github.com/nandbhat/dressing-in-order/tree/93ed967f588de9f3f80dcc40c51d5790569fbcab |
CoordConv | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/4r/c4rutd465an3gtvvxg6oun4mp4lnsb3j62d43pj3sv7fd3i7yk5c.py
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# ret => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %sub, %sub_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 6
x3 = (xindex // 96)
x4 = xindex % 16
x0 = xindex % 4
x1 = (xindex // 4) % 4
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + (16*x2) + (64*x3)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = x0
tmp11 = tmp10.to(tl.float32)
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tmp16 = 1.0
tmp17 = tmp15 - tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tmp21 = tl.full([1], 6, tl.int64)
tmp22 = tmp0 < tmp21
tmp23 = x1
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp14
tmp27 = tmp26 - tmp16
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tl.where(tmp9, tmp19, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + (x5), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tc/ctcagp37ljugm52zu6ckorigrppqo67voefe2f2odg5r6hyllhyu.py
# Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# ret_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 6, 4, 4), (96, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [ret], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 384, grid=grid(384), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [ret_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 6, 4, 4), (96, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from torch.nn.utils.spectral_norm import spectral_norm as SpectralNorm
def spectral_norm(module, use_spect=True):
"""use spectral normal layer to stable the training process"""
if use_spect:
return SpectralNorm(module)
else:
return module
class AddCoords(nn.Module):
"""
Add Coords to a tensor
"""
def __init__(self, with_r=False):
super(AddCoords, self).__init__()
self.with_r = with_r
def forward(self, x):
"""
:param x: shape (batch, channel, x_dim, y_dim)
:return: shape (batch, channel+2, x_dim, y_dim)
"""
B, _, x_dim, y_dim = x.size()
xx_channel = torch.arange(x_dim).repeat(B, 1, y_dim, 1).type_as(x)
yy_cahnnel = torch.arange(y_dim).repeat(B, 1, x_dim, 1).permute(0,
1, 3, 2).type_as(x)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_cahnnel = yy_cahnnel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_cahnnel = yy_cahnnel * 2 - 1
ret = torch.cat([x, xx_channel, yy_cahnnel], dim=1)
if self.with_r:
rr = torch.sqrt(xx_channel ** 2 + yy_cahnnel ** 2)
ret = torch.cat([ret, rr], dim=1)
return ret
class CoordConv(nn.Module):
"""
CoordConv operation
"""
def __init__(self, input_nc, output_nc, with_r=False, use_spect=False,
**kwargs):
super(CoordConv, self).__init__()
self.addcoords = AddCoords(with_r=with_r)
input_nc = input_nc + 2
if with_r:
input_nc = input_nc + 1
self.conv = spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs),
use_spect)
def forward(self, x):
ret = self.addcoords(x)
ret = self.conv(ret)
return ret
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_nc': 4, 'output_nc': 4, 'kernel_size': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.utils.spectral_norm import spectral_norm as SpectralNorm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 6
x3 = xindex // 96
x4 = xindex % 16
x0 = xindex % 4
x1 = xindex // 4 % 4
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x4 + 16 * x2 + 64 * x3), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 5, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = x0
tmp11 = tmp10.to(tl.float32)
tmp12 = 0.3333333333333333
tmp13 = tmp11 * tmp12
tmp14 = 2.0
tmp15 = tmp13 * tmp14
tmp16 = 1.0
tmp17 = tmp15 - tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp9, tmp17, tmp18)
tmp20 = tmp0 >= tmp7
tl.full([1], 6, tl.int64)
tmp23 = x1
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp24 * tmp12
tmp26 = tmp25 * tmp14
tmp27 = tmp26 - tmp16
tmp28 = tl.full(tmp27.shape, 0.0, tmp27.dtype)
tmp29 = tl.where(tmp20, tmp27, tmp28)
tmp30 = tl.where(tmp9, tmp19, tmp29)
tmp31 = tl.where(tmp4, tmp5, tmp30)
tl.store(out_ptr0 + x5, tmp31, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 6, 4, 4), (96, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 6, 4, 4), (96, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(384)](primals_1, buf0, 384, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(16)](buf2, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf2, primals_2, buf0
def spectral_norm(module, use_spect=True):
"""use spectral normal layer to stable the training process"""
if use_spect:
return SpectralNorm(module)
else:
return module
class AddCoords(nn.Module):
"""
Add Coords to a tensor
"""
def __init__(self, with_r=False):
super(AddCoords, self).__init__()
self.with_r = with_r
def forward(self, x):
"""
:param x: shape (batch, channel, x_dim, y_dim)
:return: shape (batch, channel+2, x_dim, y_dim)
"""
B, _, x_dim, y_dim = x.size()
xx_channel = torch.arange(x_dim).repeat(B, 1, y_dim, 1).type_as(x)
yy_cahnnel = torch.arange(y_dim).repeat(B, 1, x_dim, 1).permute(0,
1, 3, 2).type_as(x)
xx_channel = xx_channel.float() / (x_dim - 1)
yy_cahnnel = yy_cahnnel.float() / (y_dim - 1)
xx_channel = xx_channel * 2 - 1
yy_cahnnel = yy_cahnnel * 2 - 1
ret = torch.cat([x, xx_channel, yy_cahnnel], dim=1)
if self.with_r:
rr = torch.sqrt(xx_channel ** 2 + yy_cahnnel ** 2)
ret = torch.cat([ret, rr], dim=1)
return ret
class CoordConvNew(nn.Module):
"""
CoordConv operation
"""
def __init__(self, input_nc, output_nc, with_r=False, use_spect=False,
**kwargs):
super(CoordConvNew, self).__init__()
self.addcoords = AddCoords(with_r=with_r)
input_nc = input_nc + 2
if with_r:
input_nc = input_nc + 1
self.conv = spectral_norm(nn.Conv2d(input_nc, output_nc, **kwargs),
use_spect)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| nandbhat/dressing-in-order | CoordConv | false | 16,129 | [
"BSD-3-Clause"
] | 172 | 93ed967f588de9f3f80dcc40c51d5790569fbcab | https://github.com/nandbhat/dressing-in-order/tree/93ed967f588de9f3f80dcc40c51d5790569fbcab |
SLN | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/hp/chpdwpegv6lvistek2wqgimtufecqvfp6grp5rpblk5yjicjzqd2.py
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
# Source node to ATen node mapping:
# layer_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_5, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused_native_layer_norm_0 = async_compile.triton('triton_poi_fused_native_layer_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_native_layer_norm_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gg/cggxeeiltr6q3hadhy3xly5zo5i7ee2zz54qysg5jxzgyuu37b52.py
# Topologically Sorted Source Nodes: [mul, layer_norm, mul_1, mul_2, add], Original ATen: [aten.mul, aten.native_layer_norm, aten.add]
# Source node to ATen node mapping:
# add => add_2
# layer_norm => add, add_1, mul_1, mul_2, rsqrt, sub, var_mean
# mul => mul
# mul_1 => mul_3
# mul_2 => mul_4
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%primals_5, [3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_5, %getitem_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_3), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_4), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_6, %primals_2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
triton_poi_fused_add_mul_native_layer_norm_1 = async_compile.triton('triton_poi_fused_add_mul_native_layer_norm_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_native_layer_norm_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x2), xmask)
tmp4 = tl.load(in_ptr2 + (x2), xmask)
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr6 + (x0), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr7 + (0))
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp3 = tmp1 * tmp2
tmp6 = tmp4 - tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 * tmp9
tmp12 = tmp10 + tmp11
tmp13 = tmp3 * tmp12
tmp16 = tmp15 * tmp2
tmp17 = tmp13 + tmp16
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 1), (1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (1, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
# Topologically Sorted Source Nodes: [layer_norm], Original ATen: [aten.native_layer_norm]
stream0 = get_raw_stream(0)
triton_poi_fused_native_layer_norm_0.run(primals_5, buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, layer_norm, mul_1, mul_2, add], Original ATen: [aten.mul, aten.native_layer_norm, aten.add]
triton_poi_fused_add_mul_native_layer_norm_1.run(primals_1, primals_2, primals_5, buf0, buf1, primals_3, primals_4, primals_6, buf2, 256, grid=grid(256), stream=stream0)
del buf0
del buf1
del primals_6
return (buf2, primals_1, primals_2, primals_3, primals_4, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 1, 1), (1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class SLN(nn.Module):
"""
Self-modulated LayerNorm
"""
def __init__(self, num_features):
super(SLN, self).__init__()
self.ln = nn.LayerNorm(num_features)
self.gamma = nn.Parameter(torch.randn(1, 1, 1))
self.beta = nn.Parameter(torch.randn(1, 1, 1))
def forward(self, hl, w):
return self.gamma * w * self.ln(hl) + self.beta * w
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_features': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_native_layer_norm_0(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_mul_native_layer_norm_1(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x2, xmask)
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr6 + x0, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr7 + 0)
tmp15 = tl.broadcast_to(tmp14, [XBLOCK])
tmp3 = tmp1 * tmp2
tmp6 = tmp4 - tmp5
tmp8 = tmp6 * tmp7
tmp10 = tmp8 * tmp9
tmp12 = tmp10 + tmp11
tmp13 = tmp3 * tmp12
tmp16 = tmp15 * tmp2
tmp17 = tmp13 + tmp16
tl.store(out_ptr0 + x2, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (1, 1, 1), (1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_6, (1, 1, 1), (1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
buf1 = empty_strided_cuda((4, 4, 4, 1), (16, 4, 1, 64), torch.float32)
get_raw_stream(0)
triton_poi_fused_native_layer_norm_0[grid(64)](primals_5, buf0,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_native_layer_norm_1[grid(256)](primals_1,
primals_2, primals_5, buf0, buf1, primals_3, primals_4,
primals_6, buf2, 256, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del buf1
del primals_6
return buf2, primals_1, primals_2, primals_3, primals_4, primals_5
class SLNNew(nn.Module):
"""
Self-modulated LayerNorm
"""
def __init__(self, num_features):
super(SLNNew, self).__init__()
self.ln = nn.LayerNorm(num_features)
self.gamma = nn.Parameter(torch.randn(1, 1, 1))
self.beta = nn.Parameter(torch.randn(1, 1, 1))
def forward(self, input_0, input_1):
primals_1 = self.gamma
primals_6 = self.beta
primals_3 = self.ln.weight
primals_4 = self.ln.bias
primals_2 = input_0
primals_5 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
| nandkishore1/TransformersInVision | SLN | false | 16,130 | [
"MIT"
] | 94 | 134ef26b63916d2b9ae384124de7365a97102b06 | https://github.com/nandkishore1/TransformersInVision/tree/134ef26b63916d2b9ae384124de7365a97102b06 |
ToLongTensor | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ml/cmltiy5hrv2wqhdqt3fezcfhgimkd7dolxjtsc7omtdmbpeu3exe.py
# Topologically Sorted Source Nodes: [tensor], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# tensor => convert_element_type
# Graph fragment:
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%arg0_1, torch.float32), kwargs = {})
triton_poi_fused__to_copy_0 = async_compile.triton('triton_poi_fused__to_copy_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [tensor], Original ATen: [aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import Tensor
from typing import List
import torch.nn as nn
class ToLongTensor(nn.Module):
"""Convert a list of integers to long tensor
"""
def __init__(self):
super(ToLongTensor, self).__init__()
def forward(self, tokens: 'List[List[int]]') ->Tensor:
return torch.tensor(tokens)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__to_copy_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class ToLongTensorNew(nn.Module):
"""Convert a list of integers to long tensor
"""
def __init__(self):
super(ToLongTensorNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| nateanl/text | ToLongTensor | false | 16,131 | [
"BSD-3-Clause"
] | 3,172 | b26e9350ad387a84aefe131443bbbf1c51a8a618 | https://github.com/nateanl/text/tree/b26e9350ad387a84aefe131443bbbf1c51a8a618 |
BucketingEmbedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/zc/czczpntvru6vzazogc4u5l3mhhrvpw4ov6oi77nd2po6duqg6kus.py
# Topologically Sorted Source Nodes: [sub, mul, x, clamp, x_1], Original ATen: [aten.sub, aten.mul, aten.div, aten.clamp, aten._to_copy]
# Source node to ATen node mapping:
# clamp => clamp_max, clamp_min
# mul => mul
# sub => sub
# x => div
# x_1 => convert_element_type
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, 4), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 4), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, 0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%div, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 3), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_max, torch.int64), kwargs = {})
triton_poi_fused__to_copy_clamp_div_mul_sub_0 = async_compile.triton('triton_poi_fused__to_copy_clamp_div_mul_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_clamp_div_mul_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_clamp_div_mul_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 4.0
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp1
tmp4 = float("inf")
tmp5 = tmp3 * tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 3.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = tmp9.to(tl.int64)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ov/cov4bh5ozngd5yjfo2obfdfgatrxsglkm6g6csjzj5mz5j4armmx.py
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
# Source node to ATen node mapping:
# embedding => embedding
# Graph fragment:
# %embedding : [num_users=1] = call_function[target=torch.ops.aten.embedding.default](args = (%primals_2, %convert_element_type), kwargs = {})
triton_poi_fused_embedding_1 = async_compile.triton('triton_poi_fused_embedding_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_embedding_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert(((0 <= tmp4) & (tmp4 < 4)) | ~(xmask), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (x0 + (4*tmp4)), xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
# Topologically Sorted Source Nodes: [sub, mul, x, clamp, x_1], Original ATen: [aten.sub, aten.mul, aten.div, aten.clamp, aten._to_copy]
stream0 = get_raw_stream(0)
triton_poi_fused__to_copy_clamp_div_mul_sub_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [embedding], Original ATen: [aten.embedding]
triton_poi_fused_embedding_1.run(buf0, primals_2, buf1, 1024, grid=grid(1024), stream=stream0)
del primals_2
return (buf1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class BucketingEmbedding(nn.Module):
def __init__(self, min_val, max_val, count, dim, use_log_scale=False):
super().__init__()
self.min_val = min_val
self.max_val = max_val
self.count = count
self.dim = dim
self.use_log_scale = use_log_scale
if self.use_log_scale:
self.min_val = torch.log2(torch.Tensor([self.min_val])).item()
self.max_val = torch.log2(torch.Tensor([self.max_val])).item()
self.main = nn.Embedding(count, dim)
def forward(self, x):
"""
x - (bs, ) values
"""
if self.use_log_scale:
x = torch.log2(x)
x = self.count * (x - self.min_val) / (self.max_val - self.min_val)
x = torch.clamp(x, 0, self.count - 1).long()
return self.main(x)
def get_class(self, x):
"""
x - (bs, ) values
"""
if self.use_log_scale:
x = torch.log2(x)
x = self.count * (x - self.min_val) / (self.max_val - self.min_val)
x = torch.clamp(x, 0, self.count - 1).long()
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'min_val': 4, 'max_val': 4, 'count': 4, 'dim': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__to_copy_clamp_div_mul_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 4.0
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp1
tmp4 = float('inf')
tmp5 = tmp3 * tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 3.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = tmp9.to(tl.int64)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_poi_fused_embedding_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4) | ~xmask,
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (x0 + 4 * tmp4), xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int64)
get_raw_stream(0)
triton_poi_fused__to_copy_clamp_div_mul_sub_0[grid(256)](primals_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
triton_poi_fused_embedding_1[grid(1024)](buf0, primals_2, buf1,
1024, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, buf0
class BucketingEmbeddingNew(nn.Module):
def __init__(self, min_val, max_val, count, dim, use_log_scale=False):
super().__init__()
self.min_val = min_val
self.max_val = max_val
self.count = count
self.dim = dim
self.use_log_scale = use_log_scale
if self.use_log_scale:
self.min_val = torch.log2(torch.Tensor([self.min_val])).item()
self.max_val = torch.log2(torch.Tensor([self.max_val])).item()
self.main = nn.Embedding(count, dim)
def get_class(self, x):
"""
x - (bs, ) values
"""
if self.use_log_scale:
x = torch.log2(x)
x = self.count * (x - self.min_val) / (self.max_val - self.min_val)
x = torch.clamp(x, 0, self.count - 1).long()
return x
def forward(self, input_0):
primals_2 = self.main.weight
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
| narekvslife/OccupancyAnticipation | BucketingEmbedding | false | 16,132 | [
"MIT"
] | 53 | 19b9f4d72b114339d07bd225a1c3feed73e982c2 | https://github.com/narekvslife/OccupancyAnticipation/tree/19b9f4d72b114339d07bd225a1c3feed73e982c2 |
conv_head_pooling | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/6r/c6rkywilfuy64m6cuftgkvjhytprq2kemqwqphmypsicig6wdmin.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 4), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 9) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 144, grid=grid(144), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 2, 2), (4, 4, 2, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class conv_head_pooling(nn.Module):
def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):
super(conv_head_pooling, self).__init__()
self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=stride +
1, padding=stride // 2, stride=stride, padding_mode=
padding_mode, groups=in_feature)
def forward(self, x):
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_feature': 4, 'out_feature': 4, 'stride': 1}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 9 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 2, 2), (4, 4, 2, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 3, 3), (36, 9, 3, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(144)](buf1, primals_2, 144,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class conv_head_poolingNew(nn.Module):
def __init__(self, in_feature, out_feature, stride, padding_mode='zeros'):
super(conv_head_poolingNew, self).__init__()
self.conv = nn.Conv2d(in_feature, out_feature, kernel_size=stride +
1, padding=stride // 2, stride=stride, padding_mode=
padding_mode, groups=in_feature)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| naver-ai/PfLayer | conv_head_pooling | false | 16,133 | [
"Apache-2.0"
] | 59 | da8f80b2ea3b6bd7fbee3beee8b1516c89bc0441 | https://github.com/naver-ai/PfLayer/tree/da8f80b2ea3b6bd7fbee3beee8b1516c89bc0441 |
Dec | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xq/cxq5gf6fvkdaf4nlflmjjam5s54pkng7rwuy4ocdtqxfwauulkm3.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward]
# Source node to ATen node mapping:
# h => gt, mul, where
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 0.2), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %view_1, %mul), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%view_6, 0), kwargs = {})
triton_poi_fused_leaky_relu_leaky_relu_backward_0 = async_compile.triton('triton_poi_fused_leaky_relu_leaky_relu_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_leaky_relu_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + (x4), tmp7, xmask)
tl.store(out_ptr0 + (x4), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6h/c6hgrncbhy7kjladlqflhqnw52mciqxt6qj53hxyw2giskevmcnl.py
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
# Source node to ATen node mapping:
# linear_1 => view_7
# Graph fragment:
# %view_7 : [num_users=2] = call_function[target=torch.ops.aten.reshape.default](args = (%view_6, [64, 4]), kwargs = {})
triton_poi_fused_view_1 = async_compile.triton('triton_poi_fused_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_view_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1) + (16*((x1 % 4) // 4)) + (64*(((4*((x1 // 4) % 4)) + (x1 % 4)) // 16))), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_leaky_relu_backward_0.run(buf1, primals_2, buf8, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf3 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.leaky_relu, aten.leaky_relu_backward]
triton_poi_fused_leaky_relu_leaky_relu_backward_0.run(buf4, primals_5, buf7, 256, grid=grid(256), stream=stream0)
del primals_5
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.view]
triton_poi_fused_view_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (64, 4), (4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_7
return (reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf2, buf5, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class Dec(nn.Module):
def __init__(self, opt):
super(Dec, self).__init__()
self.fc1 = nn.Linear(opt.resSize, opt.ngh)
self.fc2 = nn.Linear(opt.ngh, opt.ngh)
self.fc3 = nn.Linear(opt.ngh, opt.attSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def forward(self, feat):
h = self.lrelu(self.fc1(feat))
h = self.lrelu(self.fc2(h))
h = self.fc3(h)
return h
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(resSize=4, ngh=4, attSize=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_leaky_relu_leaky_relu_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tmp8 = tmp7 > tmp3
tl.store(in_out_ptr0 + x4, tmp7, xmask)
tl.store(out_ptr0 + x4, tmp8, xmask)
@triton.jit
def triton_poi_fused_view_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1 + 16 * (x1 % 4 // 4) + 64 * ((4 *
(x1 // 4 % 4) + x1 % 4) // 16)), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_leaky_relu_leaky_relu_backward_0[grid(256)](buf1,
primals_2, buf8, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (64, 4), (4, 1), 0)
del buf1
extern_kernels.mm(buf2, reinterpret_tensor(primals_4, (4, 4), (1, 4
), 0), out=buf3)
buf4 = reinterpret_tensor(buf3, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf3
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_leaky_relu_leaky_relu_backward_0[grid(256)](buf4,
primals_5, buf7, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf5 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
triton_poi_fused_view_1[grid(256)](buf4, buf5, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (64, 4), (4, 1), 0)
del buf4
extern_kernels.addmm(primals_7, buf5, reinterpret_tensor(primals_6,
(4, 4), (1, 4), 0), alpha=1, beta=1, out=buf6)
del primals_7
return reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf2, buf5, primals_6, buf7, primals_4, buf8
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class DecNew(nn.Module):
def __init__(self, opt):
super(DecNew, self).__init__()
self.fc1 = nn.Linear(opt.resSize, opt.ngh)
self.fc2 = nn.Linear(opt.ngh, opt.ngh)
self.fc3 = nn.Linear(opt.ngh, opt.attSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_6 = self.fc3.weight
primals_7 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
| naraysa/gzsl-od | Dec | false | 16,134 | [
"MIT"
] | 50 | be771e12e17a4c02386c70697c4b26e3170a7557 | https://github.com/naraysa/gzsl-od/tree/be771e12e17a4c02386c70697c4b26e3170a7557 |
MultiheadAttention | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/5w/c5wnubyijcgstpnbhnht5ommr737mwfx67lgpfc6mvwlwmhzfkmq.py
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# q_1 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/sf/csfcrqkks4iayy4on2awtrbqheshqxffc3mlksj52ra4iznyf36o.py
# Topologically Sorted Source Nodes: [min_1, sub, max_1, sub_1, attn_weights_1], Original ATen: [aten.min, aten.sub, aten.max, aten.div, aten.isnan, aten.logical_and, aten.eq, aten.logical_or]
# Source node to ATen node mapping:
# attn_weights_1 => div
# max_1 => max_1
# min_1 => min_1
# sub => sub
# sub_1 => sub_1
# Graph fragment:
# %min_1 : [num_users=4] = call_function[target=torch.ops.aten.min.default](args = (%bmm,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%bmm, %min_1), kwargs = {})
# %max_1 : [num_users=3] = call_function[target=torch.ops.aten.max.default](args = (%bmm,), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%max_1, %min_1), kwargs = {})
# %div : [num_users=3] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub, %sub_1), kwargs = {})
# %isnan : [num_users=2] = call_function[target=torch.ops.aten.isnan.default](args = (%bmm,), kwargs = {})
# %isnan_1 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%min_1,), kwargs = {})
# %logical_and : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%isnan, %isnan_1), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%bmm, %min_1), kwargs = {})
# %logical_or : [num_users=1] = call_function[target=torch.ops.aten.logical_or.default](args = (%eq, %logical_and), kwargs = {})
# %isnan_3 : [num_users=1] = call_function[target=torch.ops.aten.isnan.default](args = (%max_1,), kwargs = {})
# %logical_and_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_and.default](args = (%isnan, %isnan_3), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%bmm, %max_1), kwargs = {})
# %logical_or_1 : [num_users=1] = call_function[target=torch.ops.aten.logical_or.default](args = (%eq_1, %logical_and_1), kwargs = {})
triton_per_fused_div_eq_isnan_logical_and_logical_or_max_min_sub_1 = async_compile.triton('triton_per_fused_div_eq_isnan_logical_and_logical_or_max_min_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {5: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 6), equal_to_1=(5,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_eq_isnan_logical_and_logical_or_max_min_sub_1', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_eq_isnan_logical_and_logical_or_max_min_sub_1(in_ptr0, out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(triton_helpers.min2(tmp1, 0))
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0))
tmp6 = tmp5 - tmp3
tmp7 = tmp0 - tmp3
tmp8 = tmp7 / tmp6
tmp9 = tmp0 == tmp3
tmp10 = libdevice.isnan(tmp0).to(tl.int1)
tmp11 = libdevice.isnan(tmp3).to(tl.int1)
tmp12 = tmp10 & tmp11
tmp13 = tmp9 | tmp12
tmp14 = tmp0 == tmp5
tmp15 = libdevice.isnan(tmp5).to(tl.int1)
tmp16 = tmp10 & tmp15
tmp17 = tmp14 | tmp16
tl.store(out_ptr2 + (tl.full([1], 0, tl.int32)), tmp6, None)
tl.store(out_ptr3 + (tl.broadcast_to(r0, [RBLOCK])), tmp8, None)
tl.store(out_ptr4 + (tl.broadcast_to(r0, [RBLOCK])), tmp13, None)
tl.store(out_ptr5 + (tl.broadcast_to(r0, [RBLOCK])), tmp17, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qa/cqazar4hg4rdjbxm7zr5mix2w3dkhfmvvjksn7c6lktr5yfe6ndy.py
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# contiguous_3 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_8,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x1 + (16*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/c2/cc2wsialcqiknwetscnqy3fzaqmmib3cxfb7tsfjx7hdlsxbdq7s.py
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# attn_weights_4 => div_1
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%view_17, [1]), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, 4), kwargs = {})
triton_poi_fused_div_sum_3 = async_compile.triton('triton_poi_fused_div_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = (xindex // 64)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (256*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + (256*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + (256*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + (256*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [k], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 4), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1, beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [v], Original ATen: [aten.addmm]
extern_kernels.addmm(reinterpret_tensor(primals_5, (4, ), (1, ), 8), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1, beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [q_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(buf3, primals_5, 64, grid=grid(64), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_weights], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0), 0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
buf13 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.bool)
buf14 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [min_1, sub, max_1, sub_1, attn_weights_1], Original ATen: [aten.min, aten.sub, aten.max, aten.div, aten.isnan, aten.logical_and, aten.eq, aten.logical_or]
triton_per_fused_div_eq_isnan_logical_and_logical_or_max_min_sub_1.run(buf4, buf7, buf8, buf13, buf14, 1, 1024, grid=grid(1), stream=stream0)
del buf4
buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn], Original ATen: [aten.bmm]
extern_kernels.bmm(buf8, reinterpret_tensor(buf2, (16, 16, 1), (1, 16, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [contiguous_3], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf9, buf10, 4, 16, grid=grid(4, 16), stream=stream0)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [attn_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf11)
del primals_7
buf12 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, attn_weights_4], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_3.run(buf8, buf12, 256, grid=grid(256), stream=stream0)
return (reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0), buf12, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), buf7, buf8, reinterpret_tensor(buf10, (16, 4), (4, 1), 0), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0), buf13, buf14, reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((12, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
import torch.utils
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torch.nn import Parameter
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(self, query, key, value, attn_mask=None):
"""Input shape: Time x Batch x Channel
Self-attention can be implemented by passing in the same arguments for
query, key and value. Timesteps can be masked by supplying a T x T mask in the
`attn_mask` argument. Padding elements can be excluded from
the key by passing a binary ByteTensor (`key_padding_mask`) with shape:
batch x src_len, where padding elements are indicated by 1s.
"""
qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr()
kv_same = key.data_ptr() == value.data_ptr()
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
if qkv_same:
q, k, v = self.in_proj_qkv(query)
elif kv_same:
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k, v = self.in_proj_kv(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim
).transpose(0, 1)
src_len = k.size(1)
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])],
dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])],
dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(
attn_mask.size(0), 1)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len,
src_len]
if attn_mask is not None:
try:
attn_weights *= attn_mask.unsqueeze(0)
except:
None
None
assert False
attn_weights = (attn_weights - torch.min(attn_weights)) / (torch.
max(attn_weights) - torch.min(attn_weights))
attn_weights = F.dropout(attn_weights, p=self.attn_dropout,
training=self.training)
attn = torch.bmm(attn_weights, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.
head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.sum(dim=1) / self.num_heads
return attn, attn_weights
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4,
4, 4, 4])]
def get_init_inputs():
return [[], {'embed_dim': 4, 'num_heads': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
import torch.utils
from torch.nn.parameter import Parameter
import torch.nn.functional as F
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_per_fused_div_eq_isnan_logical_and_logical_or_max_min_sub_1(in_ptr0,
out_ptr2, out_ptr3, out_ptr4, out_ptr5, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.broadcast_to(tmp0, [RBLOCK])
tmp3 = triton_helpers.promote_to_tensor(triton_helpers.min2(tmp1, 0))
tmp5 = triton_helpers.promote_to_tensor(triton_helpers.max2(tmp1, 0))
tmp6 = tmp5 - tmp3
tmp7 = tmp0 - tmp3
tmp8 = tmp7 / tmp6
tmp9 = tmp0 == tmp3
tmp10 = libdevice.isnan(tmp0).to(tl.int1)
tmp11 = libdevice.isnan(tmp3).to(tl.int1)
tmp12 = tmp10 & tmp11
tmp13 = tmp9 | tmp12
tmp14 = tmp0 == tmp5
tmp15 = libdevice.isnan(tmp5).to(tl.int1)
tmp16 = tmp10 & tmp15
tmp17 = tmp14 | tmp16
tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp6, None)
tl.store(out_ptr3 + tl.broadcast_to(r0, [RBLOCK]), tmp8, None)
tl.store(out_ptr4 + tl.broadcast_to(r0, [RBLOCK]), tmp13, None)
tl.store(out_ptr5 + tl.broadcast_to(r0, [RBLOCK]), tmp17, None)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (x1 + 16 * y0), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_sum_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x1 = xindex // 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 256 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (64 + x0 + 256 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (128 + x0 + 256 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (192 + x0 + 256 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (12, 4), (4, 1))
assert_size_stride(primals_5, (12,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 4),
reinterpret_tensor(primals_2, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 16), alpha=1,
beta=1, out=buf1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(reinterpret_tensor(primals_5, (4,), (1,), 8),
reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 32), alpha=1,
beta=1, out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(64)](buf3, primals_5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 1), (1, 16, 0),
0), reinterpret_tensor(buf1, (16, 1, 16), (1, 1, 16), 0), out=buf4)
buf7 = empty_strided_cuda((), (), torch.float32)
buf8 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.float32)
buf13 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.bool)
buf14 = empty_strided_cuda((16, 4, 16), (64, 16, 1), torch.bool)
triton_per_fused_div_eq_isnan_logical_and_logical_or_max_min_sub_1[grid
(1)](buf4, buf7, buf8, buf13, buf14, 1, 1024, num_warps=8,
num_stages=1)
del buf4
buf9 = empty_strided_cuda((16, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf8, reinterpret_tensor(buf2, (16, 16, 1), (1,
16, 1), 0), out=buf9)
buf10 = empty_strided_cuda((4, 16, 1), (16, 1, 1), torch.float32)
triton_poi_fused_clone_2[grid(4, 16)](buf9, buf10, 4, 16, XBLOCK=16,
YBLOCK=4, num_warps=1, num_stages=1)
buf11 = reinterpret_tensor(buf9, (16, 4), (4, 1), 0)
del buf9
extern_kernels.addmm(primals_7, reinterpret_tensor(buf10, (16, 4),
(4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf11)
del primals_7
buf12 = empty_strided_cuda((4, 4, 16), (64, 16, 1), torch.float32)
triton_poi_fused_div_sum_3[grid(256)](buf8, buf12, 256, XBLOCK=128,
num_warps=4, num_stages=1)
return reinterpret_tensor(buf11, (4, 4, 4), (16, 4, 1), 0
), buf12, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_2, (64, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), buf7, buf8, reinterpret_tensor(buf10, (16, 4), (4, 1), 0
), primals_6, reinterpret_tensor(buf2, (16, 1, 16), (1, 1, 16), 0
), buf13, buf14, reinterpret_tensor(buf3, (16, 1, 4), (1, 1, 16), 0
), reinterpret_tensor(buf1, (16, 16, 1), (1, 16, 1), 0)
class MultiheadAttentionNew(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(self, embed_dim, num_heads, attn_dropout=0.0, bias=True,
add_bias_kv=False, add_zero_attn=False):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.attn_dropout = attn_dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, 'embed_dim must be divisible by num_heads'
self.scaling = self.head_dim ** -0.5
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.in_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.0)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_kv(self, key):
return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1)
def in_proj_q(self, query):
return self._in_proj(query, end=self.embed_dim)
def in_proj_k(self, key):
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
def in_proj_v(self, value):
return self._in_proj(value, start=2 * self.embed_dim)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def forward(self, input_0, input_1, input_2):
primals_4 = self.in_proj_weight
primals_5 = self.in_proj_bias
primals_6 = self.out_proj.weight
primals_7 = self.out_proj.bias
primals_1 = input_0
primals_2 = input_1
primals_3 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0], output[1]
| muqiaoy/dl_signal | MultiheadAttention | false | 16,135 | [
"MIT"
] | 54 | 3a30d14982016644bfc96a7d1ca0109b441f17fd | https://github.com/muqiaoy/dl_signal/tree/3a30d14982016644bfc96a7d1ca0109b441f17fd |
AdaptiveFeatureNorm | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xu/cxuw4zngts3btgwtvpkzjuhzg5qrx7eatj7fzhrybxfvwesbsblb.py
# Topologically Sorted Source Nodes: [norm_1, norm, radius_1, sub, pow_1, loss], Original ATen: [aten.linalg_vector_norm, aten.add, aten.sub, aten.pow, aten.mean]
# Source node to ATen node mapping:
# loss => mean
# norm => pow_1, pow_2, sum_1
# norm_1 => pow_3, pow_4, sum_2
# pow_1 => pow_5
# radius_1 => add
# sub => sub
# Graph fragment:
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_3, [1]), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1]), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_2, 4), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_4, %add), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_5,), kwargs = {})
triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0 = async_compile.triton('triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 4.0
tmp13 = tmp11 + tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp18 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [norm_1, norm, radius_1, sub, pow_1, loss], Original ATen: [aten.linalg_vector_norm, aten.add, aten.sub, aten.pow, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0.run(buf1, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class AdaptiveFeatureNorm(nn.Module):
"""
The `Stepwise Adaptive Feature Norm loss (ICCV 2019) <https://arxiv.org/pdf/1811.07456v2.pdf>`_
Instead of using restrictive scalar R to match the corresponding feature norm, Stepwise Adaptive Feature Norm
is used in order to learn task-specific features with large norms in a progressive manner.
We denote parameters of backbone :math:`G` as :math:`\\theta_g`, parameters of bottleneck :math:`F_f` as :math:`\\theta_f`
, parameters of classifier head :math:`F_y` as :math:`\\theta_y`, and features extracted from sample :math:`x_i` as
:math:`h(x_i;\\theta)`. Full loss is calculated as follows
.. math::
L(\\theta_g,\\theta_f,\\theta_y)=\\frac{1}{n_s}\\sum_{(x_i,y_i)\\in D_s}L_y(x_i,y_i)+\\frac{\\lambda}{n_s+n_t}
\\sum_{x_i\\in D_s\\cup D_t}L_d(h(x_i;\\theta_0)+\\Delta_r,h(x_i;\\theta))\\\\
where :math:`L_y` denotes classification loss, :math:`L_d` denotes norm loss, :math:`\\theta_0` and :math:`\\theta`
represent the updated and updating model parameters in the last and current iterations respectively.
Args:
delta (float): positive residual scalar to control the feature norm enlargement.
Inputs:
- f (tensor): feature representations on source or target domain.
Shape:
- f: :math:`(N, F)` where F means the dimension of input features.
- Outputs: scalar.
Examples::
>>> adaptive_feature_norm = AdaptiveFeatureNorm(delta=1)
>>> f_s = torch.randn(32, 1000)
>>> f_t = torch.randn(32, 1000)
>>> norm_loss = adaptive_feature_norm(f_s) + adaptive_feature_norm(f_t)
"""
def __init__(self, delta):
super(AdaptiveFeatureNorm, self).__init__()
self.delta = delta
def forward(self, f: 'torch.Tensor') ->torch.Tensor:
radius = f.norm(p=2, dim=1).detach()
assert radius.requires_grad is False
radius = radius + self.delta
loss = ((f.norm(p=2, dim=1) - radius) ** 2).mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'delta': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0(in_out_ptr0,
in_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp2 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp5 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp8 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp11 = libdevice.sqrt(tmp10)
tmp12 = 4.0
tmp13 = tmp11 + tmp12
tmp14 = tmp11 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp19 = 64.0
tmp20 = tmp18 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_linalg_vector_norm_mean_pow_sub_0[grid(1)](buf1,
arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class AdaptiveFeatureNormNew(nn.Module):
"""
The `Stepwise Adaptive Feature Norm loss (ICCV 2019) <https://arxiv.org/pdf/1811.07456v2.pdf>`_
Instead of using restrictive scalar R to match the corresponding feature norm, Stepwise Adaptive Feature Norm
is used in order to learn task-specific features with large norms in a progressive manner.
We denote parameters of backbone :math:`G` as :math:`\\theta_g`, parameters of bottleneck :math:`F_f` as :math:`\\theta_f`
, parameters of classifier head :math:`F_y` as :math:`\\theta_y`, and features extracted from sample :math:`x_i` as
:math:`h(x_i;\\theta)`. Full loss is calculated as follows
.. math::
L(\\theta_g,\\theta_f,\\theta_y)=\\frac{1}{n_s}\\sum_{(x_i,y_i)\\in D_s}L_y(x_i,y_i)+\\frac{\\lambda}{n_s+n_t}
\\sum_{x_i\\in D_s\\cup D_t}L_d(h(x_i;\\theta_0)+\\Delta_r,h(x_i;\\theta))\\\\
where :math:`L_y` denotes classification loss, :math:`L_d` denotes norm loss, :math:`\\theta_0` and :math:`\\theta`
represent the updated and updating model parameters in the last and current iterations respectively.
Args:
delta (float): positive residual scalar to control the feature norm enlargement.
Inputs:
- f (tensor): feature representations on source or target domain.
Shape:
- f: :math:`(N, F)` where F means the dimension of input features.
- Outputs: scalar.
Examples::
>>> adaptive_feature_norm = AdaptiveFeatureNorm(delta=1)
>>> f_s = torch.randn(32, 1000)
>>> f_t = torch.randn(32, 1000)
>>> norm_loss = adaptive_feature_norm(f_s) + adaptive_feature_norm(f_t)
"""
def __init__(self, delta):
super(AdaptiveFeatureNormNew, self).__init__()
self.delta = delta
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| neka-nat/Transfer-Learning-Library | AdaptiveFeatureNorm | false | 16,136 | [
"MIT"
] | 1,474 | a3b27b0d7562fa90a02e914140b37ab438469e6c | https://github.com/neka-nat/Transfer-Learning-Library/tree/a3b27b0d7562fa90a02e914140b37ab438469e6c |
BatchSpectralPenalizationLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3r/c3rbz7dw35fg7j6ysnzuk22mifl2txzhu2tdvutfxll4lensmwuh.py
# Topologically Sorted Source Nodes: [pow_1, pow_2, loss], Original ATen: [aten.pow, aten.add]
# Source node to ATen node mapping:
# loss => add
# pow_1 => pow_1
# pow_2 => pow_2
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select_1, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
triton_poi_fused_add_pow_0 = async_compile.triton('triton_poi_fused_add_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [svd], Original ATen: [aten._linalg_svd]
buf0 = torch.ops.aten._linalg_svd.default(arg0_1)
del arg0_1
buf2 = buf0[1]
del buf0
# Topologically Sorted Source Nodes: [svd_1], Original ATen: [aten._linalg_svd]
buf4 = torch.ops.aten._linalg_svd.default(arg1_1)
del arg1_1
buf6 = buf4[1]
del buf4
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, pow_2, loss], Original ATen: [aten.pow, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_pow_0.run(buf2, buf6, buf8, 16, grid=grid(16), stream=stream0)
del buf2
del buf6
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class BatchSpectralPenalizationLoss(nn.Module):
"""Batch spectral penalization loss from `Transferability vs. Discriminability: Batch
Spectral Penalization for Adversarial Domain Adaptation (ICML 2019)
<http://ise.thss.tsinghua.edu.cn/~mlong/doc/batch-spectral-penalization-icml19.pdf>`_.
Given source features :math:`f_s` and target features :math:`f_t` in current mini batch, singular value
decomposition is first performed
.. math::
f_s = U_s\\Sigma_sV_s^T
.. math::
f_t = U_t\\Sigma_tV_t^T
Then batch spectral penalization loss is calculated as
.. math::
loss=\\sum_{i=1}^k(\\sigma_{s,i}^2+\\sigma_{t,i}^2)
where :math:`\\sigma_{s,i},\\sigma_{t,i}` refer to the :math:`i-th` largest singular value of source features
and target features respectively. We empirically set :math:`k=1`.
Inputs:
- f_s (tensor): feature representations on source domain, :math:`f^s`
- f_t (tensor): feature representations on target domain, :math:`f^t`
Shape:
- f_s, f_t: :math:`(N, F)` where F means the dimension of input features.
- Outputs: scalar.
"""
def __init__(self):
super(BatchSpectralPenalizationLoss, self).__init__()
def forward(self, f_s, f_t):
_, s_s, _ = torch.svd(f_s)
_, s_t, _ = torch.svd(f_t)
loss = torch.pow(s_s[0], 2) + torch.pow(s_t[0], 2)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_pow_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._linalg_svd.default(arg0_1)
del arg0_1
buf2 = buf0[1]
del buf0
buf4 = torch.ops.aten._linalg_svd.default(arg1_1)
del arg1_1
buf6 = buf4[1]
del buf4
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_pow_0[grid(16)](buf2, buf6, buf8, 16, XBLOCK=
16, num_warps=1, num_stages=1)
del buf2
del buf6
return buf8,
class BatchSpectralPenalizationLossNew(nn.Module):
"""Batch spectral penalization loss from `Transferability vs. Discriminability: Batch
Spectral Penalization for Adversarial Domain Adaptation (ICML 2019)
<http://ise.thss.tsinghua.edu.cn/~mlong/doc/batch-spectral-penalization-icml19.pdf>`_.
Given source features :math:`f_s` and target features :math:`f_t` in current mini batch, singular value
decomposition is first performed
.. math::
f_s = U_s\\Sigma_sV_s^T
.. math::
f_t = U_t\\Sigma_tV_t^T
Then batch spectral penalization loss is calculated as
.. math::
loss=\\sum_{i=1}^k(\\sigma_{s,i}^2+\\sigma_{t,i}^2)
where :math:`\\sigma_{s,i},\\sigma_{t,i}` refer to the :math:`i-th` largest singular value of source features
and target features respectively. We empirically set :math:`k=1`.
Inputs:
- f_s (tensor): feature representations on source domain, :math:`f^s`
- f_t (tensor): feature representations on target domain, :math:`f^t`
Shape:
- f_s, f_t: :math:`(N, F)` where F means the dimension of input features.
- Outputs: scalar.
"""
def __init__(self):
super(BatchSpectralPenalizationLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| neka-nat/Transfer-Learning-Library | BatchSpectralPenalizationLoss | false | 16,137 | [
"MIT"
] | 1,474 | a3b27b0d7562fa90a02e914140b37ab438469e6c | https://github.com/neka-nat/Transfer-Learning-Library/tree/a3b27b0d7562fa90a02e914140b37ab438469e6c |
conv_embedding | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/cc/ccc3jyqbz5rjanlhmaez5ahwrmffxjcm5mibxaw5efspqo45e4re.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [4, 4], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 81) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(4, 4), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 1296, grid=grid(1296), stream=stream0)
del primals_2
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torch import nn
class conv_embedding(nn.Module):
def __init__(self, in_channels, out_channels, patch_size, stride, padding):
super(conv_embedding, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=
patch_size, stride=stride, padding=padding, bias=True)
def forward(self, x):
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'patch_size': 4,
'stride': 1, 'padding': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 1296
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 81 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(4, 4), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(1296)](buf1, primals_2, 1296,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf1, primals_1, primals_3
class conv_embeddingNew(nn.Module):
def __init__(self, in_channels, out_channels, patch_size, stride, padding):
super(conv_embeddingNew, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=
patch_size, stride=stride, padding=padding, bias=True)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| naver-ai/PfLayer | conv_embedding | false | 16,138 | [
"Apache-2.0"
] | 59 | da8f80b2ea3b6bd7fbee3beee8b1516c89bc0441 | https://github.com/naver-ai/PfLayer/tree/da8f80b2ea3b6bd7fbee3beee8b1516c89bc0441 |
MLP_G | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# h => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5n/c5nsofekij6a7ap52g7x25mzkhruvzcsi6l2zpm7nkpnzba6k7fc.py
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# h_1 => gt, mul, where
# Graph fragment:
# %add_tensor_2 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_2, %primals_4), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_2, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_2, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_2, %mul), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nc/cncd5yvgjpjfrvyaveva55remy3ckx5quvuhdmurdt3pp6k3qtux.py
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# h_2 => relu
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_6), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_2 = async_compile.triton('triton_poi_fused_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tx/ctxekgeatn5nsr2greclznq67r4k7scadshdg3wozur7kiuzhnlq.py
# Topologically Sorted Source Nodes: [h_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# h_3 => relu_1
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_8), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf1)
del primals_3
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf2, primals_4, 16, grid=grid(16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), out=buf3)
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.relu]
triton_poi_fused_relu_2.run(buf4, primals_6, 16, grid=grid(16), stream=stream0)
del primals_6
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf5)
buf6 = buf5; del buf5 # reuse
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
# Topologically Sorted Source Nodes: [h_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_3.run(buf6, primals_8, buf7, 16, grid=grid(16), stream=stream0)
del primals_8
return (buf6, buf0, buf2, buf4, buf7, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from _paritybench_helpers import _mock_config
import torch
import torch.nn as nn
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class MLP_G(nn.Module):
def __init__(self, opt):
super(MLP_G, self).__init__()
self.fc1 = nn.Linear(opt.attSize + opt.nz, opt.ngh)
self.fc2 = nn.Linear(opt.ngh, opt.ngh)
self.fc3 = nn.Linear(opt.ngh, opt.resSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def forward(self, noise, att):
h = torch.cat((noise, att), 1)
h = self.lrelu(self.fc1(h))
h = self.relu(self.fc2(h))
h = self.relu(self.fc3(h))
return h
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'opt': _mock_config(attSize=4, nz=4, ngh=4, resSize=4)}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.2
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(in_out_ptr0 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_3(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 4), (1, 8
), 0), out=buf1)
del primals_3
buf2 = buf1
del buf1
triton_poi_fused_leaky_relu_1[grid(16)](buf2, primals_4, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf2, reinterpret_tensor(primals_5, (4, 4), (1, 4
), 0), out=buf3)
buf4 = buf3
del buf3
triton_poi_fused_relu_2[grid(16)](buf4, primals_6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_6
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(buf4, reinterpret_tensor(primals_7, (4, 4), (1, 4
), 0), out=buf5)
buf6 = buf5
del buf5
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_3[grid(16)](buf6,
primals_8, buf7, 16, XBLOCK=16, num_warps=1, num_stages=1)
del primals_8
return buf6, buf0, buf2, buf4, buf7, primals_7, primals_5
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class MLP_GNew(nn.Module):
def __init__(self, opt):
super(MLP_GNew, self).__init__()
self.fc1 = nn.Linear(opt.attSize + opt.nz, opt.ngh)
self.fc2 = nn.Linear(opt.ngh, opt.ngh)
self.fc3 = nn.Linear(opt.ngh, opt.resSize)
self.lrelu = nn.LeakyReLU(0.2, True)
self.relu = nn.ReLU(True)
self.apply(weights_init)
def forward(self, input_0, input_1):
primals_3 = self.fc1.weight
primals_4 = self.fc1.bias
primals_1 = self.fc2.weight
primals_6 = self.fc2.bias
primals_2 = self.fc3.weight
primals_8 = self.fc3.bias
primals_5 = input_0
primals_7 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
| naraysa/gzsl-od | MLP_G | false | 16,139 | [
"MIT"
] | 50 | be771e12e17a4c02386c70697c4b26e3170a7557 | https://github.com/naraysa/gzsl-od/tree/be771e12e17a4c02386c70697c4b26e3170a7557 |
PyramidPoolingModule | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/55/c55tt6ffjm373c3aa76numhbzpowkg5gj7ojdkeb534agjhlfrlf.py
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._adaptive_avg_pool2d]
# Source node to ATen node mapping:
# x_6 => _adaptive_avg_pool2d_2
# Graph fragment:
# %_adaptive_avg_pool2d_2 : [num_users=4] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%arg0_1, [6, 6]), kwargs = {})
triton_poi_fused__adaptive_avg_pool2d_0 = async_compile.triton('triton_poi_fused__adaptive_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__adaptive_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 6) % 6
x0 = xindex % 6
x2 = (xindex // 36)
x4 = xindex
tmp0 = ((2*x1) // 3)
tmp1 = ((9 + (4*x1)) // 6)
tmp2 = tmp0 < tmp1
tmp3 = ((2*x0) // 3)
tmp4 = ((9 + (4*x0)) // 6)
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + ((4*((2*x1) // 3)) + (16*x2) + ((2*x0) // 3)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = 1 + ((2*x0) // 3)
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + (4*((2*x1) // 3)) + (16*x2) + ((2*x0) // 3)), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + ((2*x1) // 3)
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + (4*((2*x1) // 3)) + (16*x2) + ((2*x0) // 3)), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + (4*((2*x1) // 3)) + (16*x2) + ((2*x0) // 3)), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr0 + (x4), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zq/czql63zabrssuta2r6e7oa3gkjmx3nwlcogjkuu3it3vutwb4ljv.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten._adaptive_avg_pool2d]
# Source node to ATen node mapping:
# x_4 => _adaptive_avg_pool2d_1
# Graph fragment:
# %_adaptive_avg_pool2d_1 : [num_users=4] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%arg0_1, [3, 3]), kwargs = {})
triton_poi_fused__adaptive_avg_pool2d_1 = async_compile.triton('triton_poi_fused__adaptive_avg_pool2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__adaptive_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 3) % 3
x0 = xindex % 3
x2 = (xindex // 9)
x4 = xindex
tmp0 = ((4*x1) // 3)
tmp1 = 2 + ((4*x1) // 3)
tmp2 = tmp0 < tmp1
tmp3 = ((4*x0) // 3)
tmp4 = 2 + ((4*x0) // 3)
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + ((4*((4*x1) // 3)) + (16*x2) + ((4*x0) // 3)), tmp6 & xmask, other=0.0)
tmp8 = 1 + ((4*x0) // 3)
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + (4*((4*x1) // 3)) + (16*x2) + ((4*x0) // 3)), tmp10 & xmask, other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + ((4*x1) // 3)
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + (4*((4*x1) // 3)) + (16*x2) + ((4*x0) // 3)), tmp15 & xmask, other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + (4*((4*x1) // 3)) + (16*x2) + ((4*x0) // 3)), tmp18 & xmask, other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr0 + (x4), tmp30, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5t/c5tklswoxjey37dco7hj52de7i7ruj2ngeweul4cw26nllhti7fk.py
# Topologically Sorted Source Nodes: [x, x_1, feat, x_2, x_3, feat_1, x_5, feat_2, x_7, feat_3], Original ATen: [aten.mean, aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add, aten._adaptive_avg_pool2d]
# Source node to ATen node mapping:
# feat => add_5
# feat_1 => add_11
# feat_2 => add_17
# feat_3 => add_23
# x => mean
# x_1 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_2, add_3, add_4, clamp_max_2, clamp_max_3, clamp_min_1, clamp_min_2, clamp_min_3, convert_element_type_1, convert_element_type_2, convert_element_type_3, iota_1, mul_1, mul_2, mul_3, mul_4, sub, sub_1, sub_2, sub_3, sub_4
# x_2 => _adaptive_avg_pool2d
# x_3 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_10, add_8, add_9, clamp_max_6, clamp_max_7, clamp_min_5, clamp_min_6, clamp_min_7, convert_element_type_5, convert_element_type_6, convert_element_type_7, iota_3, mul_6, mul_7, mul_8, mul_9, sub_5, sub_6, sub_7, sub_8, sub_9
# x_5 => _unsafe_index_10, _unsafe_index_11, _unsafe_index_8, _unsafe_index_9, add_14, add_15, add_16, clamp_max_10, clamp_max_11, clamp_min_10, clamp_min_11, clamp_min_9, convert_element_type_10, convert_element_type_11, convert_element_type_9, iota_5, mul_11, mul_12, mul_13, mul_14, sub_10, sub_11, sub_12, sub_13, sub_14
# x_7 => _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, add_20, add_21, add_22, clamp_max_14, clamp_max_15, clamp_min_13, clamp_min_14, clamp_min_15, convert_element_type_13, convert_element_type_14, convert_element_type_15, iota_7, mul_16, mul_17, mul_18, mul_19, sub_15, sub_16, sub_17, sub_18, sub_19
# Graph fragment:
# %mean : [num_users=4] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1, -2], True), kwargs = {})
# %convert_element_type_1 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
# %iota_1 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_2 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_1, torch.float32), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_2, 0.0), kwargs = {})
# %clamp_min_1 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_1, 0.0), kwargs = {})
# %convert_element_type_3 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_1, torch.int64), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%mean, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%mean, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_1, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, %clamp_max_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_3), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%mean, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%mean, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %clamp_max_2), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_2), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %add_2), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %convert_element_type_1), kwargs = {})
# %clamp_min_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_3, 0.0), kwargs = {})
# %clamp_max_3 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_3, 1.0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_3), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_4), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, %add_4), kwargs = {})
# %_adaptive_avg_pool2d : [num_users=4] = call_function[target=torch.ops.aten._adaptive_avg_pool2d.default](args = (%arg0_1, [2, 2]), kwargs = {})
# %convert_element_type_5 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {})
# %iota_3 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_6 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_3, torch.float32), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_6, 0.3333333333333333), kwargs = {})
# %clamp_min_5 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_6, 0.0), kwargs = {})
# %convert_element_type_7 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_5, torch.int64), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_adaptive_avg_pool2d, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_adaptive_avg_pool2d, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_5, %convert_element_type_7), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_5, 0.0), kwargs = {})
# %clamp_max_6 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_7, %clamp_max_6), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_8), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_adaptive_avg_pool2d, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_adaptive_avg_pool2d, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_6), kwargs = {})
# %add_8 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_7), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_9, %add_8), kwargs = {})
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_2, %convert_element_type_5), kwargs = {})
# %clamp_min_7 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_8, 0.0), kwargs = {})
# %clamp_max_7 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_7, 1.0), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_9, %clamp_max_7), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_9), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %add_10), kwargs = {})
# %convert_element_type_9 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_4, torch.int64), kwargs = {})
# %iota_5 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_10 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_5, torch.float32), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_10, 0.6666666666666666), kwargs = {})
# %clamp_min_9 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_11, 0.0), kwargs = {})
# %convert_element_type_11 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_9, torch.int64), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_adaptive_avg_pool2d_1, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {})
# %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_adaptive_avg_pool2d_1, [None, None, %clamp_max_8, %convert_element_type_11]), kwargs = {})
# %sub_12 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_11, %_unsafe_index_10), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_9, %convert_element_type_11), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_10, 0.0), kwargs = {})
# %clamp_max_10 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_10, 1.0), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_12, %clamp_max_10), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_10, %mul_13), kwargs = {})
# %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_adaptive_avg_pool2d_1, [None, None, %convert_element_type_9, %clamp_max_9]), kwargs = {})
# %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_adaptive_avg_pool2d_1, [None, None, %convert_element_type_9, %convert_element_type_11]), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_9, %_unsafe_index_8), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_10), kwargs = {})
# %add_14 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_8, %mul_12), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_15, %add_14), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_4, %convert_element_type_9), kwargs = {})
# %clamp_min_11 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_13, 0.0), kwargs = {})
# %clamp_max_11 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_11, 1.0), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_14, %clamp_max_11), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_14, %mul_14), kwargs = {})
# %add_17 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %add_16), kwargs = {})
# %convert_element_type_13 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_6, torch.int64), kwargs = {})
# %iota_7 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_14 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_7, torch.float32), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type_14, 1.6666666666666667), kwargs = {})
# %clamp_min_13 : [num_users=2] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mul_16, 0.0), kwargs = {})
# %convert_element_type_15 : [num_users=4] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_min_13, torch.int64), kwargs = {})
# %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_adaptive_avg_pool2d_2, [None, None, %clamp_max_12, %clamp_max_13]), kwargs = {})
# %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_adaptive_avg_pool2d_2, [None, None, %clamp_max_12, %convert_element_type_15]), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_15, %_unsafe_index_14), kwargs = {})
# %sub_15 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_13, %convert_element_type_15), kwargs = {})
# %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_15, 0.0), kwargs = {})
# %clamp_max_14 : [num_users=2] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_14, 1.0), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %clamp_max_14), kwargs = {})
# %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_14, %mul_18), kwargs = {})
# %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_adaptive_avg_pool2d_2, [None, None, %convert_element_type_13, %clamp_max_13]), kwargs = {})
# %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_adaptive_avg_pool2d_2, [None, None, %convert_element_type_13, %convert_element_type_15]), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_13, %_unsafe_index_12), kwargs = {})
# %mul_17 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_16, %clamp_max_14), kwargs = {})
# %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_12, %mul_17), kwargs = {})
# %sub_19 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_21, %add_20), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_6, %convert_element_type_13), kwargs = {})
# %clamp_min_15 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_18, 0.0), kwargs = {})
# %clamp_max_15 : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_15, 1.0), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_19, %clamp_max_15), kwargs = {})
# %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_20, %mul_19), kwargs = {})
# %add_23 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_17, %add_22), kwargs = {})
triton_per_fused__adaptive_avg_pool2d__to_copy__unsafe_index_add_arange_clamp_mean_mul_sub_2 = async_compile.triton('triton_per_fused__adaptive_avg_pool2d__to_copy__unsafe_index_add_arange_clamp_mean_mul_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__adaptive_avg_pool2d__to_copy__unsafe_index_add_arange_clamp_mean_mul_sub_2', 'mutated_arg_names': ['in_out_ptr4'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__adaptive_avg_pool2d__to_copy__unsafe_index_add_arange_clamp_mean_mul_sub_2(in_out_ptr4, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 4)
r2 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = r3
tmp6 = tmp5.to(tl.float32)
tmp7 = 0.3333333333333333
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tmp10.to(tl.int32)
tmp12 = tl.full([1, 1], 1, tl.int64)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.minimum(tmp13, tmp12)
tmp15 = r2
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp16 * tmp7
tmp18 = triton_helpers.maximum(tmp17, tmp9)
tmp19 = tmp18.to(tl.int32)
tmp20 = tmp19 + tmp12
tmp21 = triton_helpers.minimum(tmp20, tmp12)
tmp22 = tl.load(in_ptr0 + ((2*tmp21) + (8*tmp14) + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (1 + (2*tmp21) + (8*tmp14) + (16*x0)), xmask, eviction_policy='evict_last')
tmp24 = tmp23 + tmp22
tmp25 = tl.load(in_ptr0 + (4 + (2*tmp21) + (8*tmp14) + (16*x0)), xmask, eviction_policy='evict_last')
tmp26 = tmp25 + tmp24
tmp27 = tl.load(in_ptr0 + (5 + (2*tmp21) + (8*tmp14) + (16*x0)), xmask, eviction_policy='evict_last')
tmp28 = tmp27 + tmp26
tmp29 = 0.25
tmp30 = tmp28 * tmp29
tmp31 = tl.load(in_ptr0 + ((2*tmp19) + (8*tmp14) + (16*x0)), xmask, eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (1 + (2*tmp19) + (8*tmp14) + (16*x0)), xmask, eviction_policy='evict_last')
tmp33 = tmp32 + tmp31
tmp34 = tl.load(in_ptr0 + (4 + (2*tmp19) + (8*tmp14) + (16*x0)), xmask, eviction_policy='evict_last')
tmp35 = tmp34 + tmp33
tmp36 = tl.load(in_ptr0 + (5 + (2*tmp19) + (8*tmp14) + (16*x0)), xmask, eviction_policy='evict_last')
tmp37 = tmp36 + tmp35
tmp38 = tmp37 * tmp29
tmp39 = tmp30 - tmp38
tmp40 = tl.load(in_ptr0 + ((2*tmp21) + (8*tmp11) + (16*x0)), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (1 + (2*tmp21) + (8*tmp11) + (16*x0)), xmask, eviction_policy='evict_last')
tmp42 = tmp41 + tmp40
tmp43 = tl.load(in_ptr0 + (4 + (2*tmp21) + (8*tmp11) + (16*x0)), xmask, eviction_policy='evict_last')
tmp44 = tmp43 + tmp42
tmp45 = tl.load(in_ptr0 + (5 + (2*tmp21) + (8*tmp11) + (16*x0)), xmask, eviction_policy='evict_last')
tmp46 = tmp45 + tmp44
tmp47 = tmp46 * tmp29
tmp48 = tl.load(in_ptr0 + ((2*tmp19) + (8*tmp11) + (16*x0)), xmask, eviction_policy='evict_last')
tmp49 = tl.load(in_ptr0 + (1 + (2*tmp19) + (8*tmp11) + (16*x0)), xmask, eviction_policy='evict_last')
tmp50 = tmp49 + tmp48
tmp51 = tl.load(in_ptr0 + (4 + (2*tmp19) + (8*tmp11) + (16*x0)), xmask, eviction_policy='evict_last')
tmp52 = tmp51 + tmp50
tmp53 = tl.load(in_ptr0 + (5 + (2*tmp19) + (8*tmp11) + (16*x0)), xmask, eviction_policy='evict_last')
tmp54 = tmp53 + tmp52
tmp55 = tmp54 * tmp29
tmp56 = tmp47 - tmp55
tmp57 = tmp19.to(tl.float32)
tmp58 = tmp18 - tmp57
tmp59 = triton_helpers.maximum(tmp58, tmp9)
tmp60 = 1.0
tmp61 = triton_helpers.minimum(tmp59, tmp60)
tmp62 = tmp39 * tmp61
tmp63 = tmp38 + tmp62
tmp64 = tmp56 * tmp61
tmp65 = tmp55 + tmp64
tmp66 = 16.0
tmp67 = tmp4 / tmp66
tmp68 = tmp67 - tmp67
tmp69 = tmp68 * tmp9
tmp70 = tmp67 + tmp69
tmp71 = tmp70 - tmp70
tmp72 = tmp71 * tmp9
tmp73 = tmp70 + tmp72
tmp74 = tmp0 + tmp73
tmp75 = tmp63 - tmp65
tmp76 = tmp11.to(tl.float32)
tmp77 = tmp10 - tmp76
tmp78 = triton_helpers.maximum(tmp77, tmp9)
tmp79 = triton_helpers.minimum(tmp78, tmp60)
tmp80 = tmp75 * tmp79
tmp81 = tmp65 + tmp80
tmp82 = tmp74 + tmp81
tmp83 = 0.6666666666666666
tmp84 = tmp6 * tmp83
tmp85 = triton_helpers.maximum(tmp84, tmp9)
tmp86 = tmp85.to(tl.int32)
tmp87 = tmp86 + tmp12
tmp88 = tl.full([1, 1], 2, tl.int64)
tmp89 = triton_helpers.minimum(tmp87, tmp88)
tmp90 = tmp16 * tmp83
tmp91 = triton_helpers.maximum(tmp90, tmp9)
tmp92 = tmp91.to(tl.int32)
tmp93 = tl.load(in_ptr1 + (tmp92 + (3*tmp89) + (9*x0)), xmask, eviction_policy='evict_last')
tmp94 = tmp92 + tmp12
tmp95 = triton_helpers.minimum(tmp94, tmp88)
tmp96 = tl.load(in_ptr1 + (tmp95 + (3*tmp89) + (9*x0)), xmask, eviction_policy='evict_last')
tmp97 = tmp96 - tmp93
tmp98 = tmp92.to(tl.float32)
tmp99 = tmp91 - tmp98
tmp100 = triton_helpers.maximum(tmp99, tmp9)
tmp101 = triton_helpers.minimum(tmp100, tmp60)
tmp102 = tmp97 * tmp101
tmp103 = tmp93 + tmp102
tmp104 = tl.load(in_ptr1 + (tmp92 + (3*tmp86) + (9*x0)), xmask, eviction_policy='evict_last')
tmp105 = tl.load(in_ptr1 + (tmp95 + (3*tmp86) + (9*x0)), xmask, eviction_policy='evict_last')
tmp106 = tmp105 - tmp104
tmp107 = tmp106 * tmp101
tmp108 = tmp104 + tmp107
tmp109 = tmp103 - tmp108
tmp110 = tmp86.to(tl.float32)
tmp111 = tmp85 - tmp110
tmp112 = triton_helpers.maximum(tmp111, tmp9)
tmp113 = triton_helpers.minimum(tmp112, tmp60)
tmp114 = tmp109 * tmp113
tmp115 = tmp108 + tmp114
tmp116 = 1.6666666666666667
tmp117 = tmp6 * tmp116
tmp118 = triton_helpers.maximum(tmp117, tmp9)
tmp119 = tmp118.to(tl.int32)
tmp120 = tmp119 + tmp12
tmp121 = tl.full([1, 1], 5, tl.int64)
tmp122 = triton_helpers.minimum(tmp120, tmp121)
tmp123 = tmp16 * tmp116
tmp124 = triton_helpers.maximum(tmp123, tmp9)
tmp125 = tmp124.to(tl.int32)
tmp126 = tl.load(in_ptr2 + (tmp125 + (6*tmp122) + (36*x0)), xmask, eviction_policy='evict_last')
tmp127 = tmp125 + tmp12
tmp128 = triton_helpers.minimum(tmp127, tmp121)
tmp129 = tl.load(in_ptr2 + (tmp128 + (6*tmp122) + (36*x0)), xmask, eviction_policy='evict_last')
tmp130 = tmp129 - tmp126
tmp131 = tmp125.to(tl.float32)
tmp132 = tmp124 - tmp131
tmp133 = triton_helpers.maximum(tmp132, tmp9)
tmp134 = triton_helpers.minimum(tmp133, tmp60)
tmp135 = tmp130 * tmp134
tmp136 = tmp126 + tmp135
tmp137 = tl.load(in_ptr2 + (tmp125 + (6*tmp119) + (36*x0)), xmask, eviction_policy='evict_last')
tmp138 = tl.load(in_ptr2 + (tmp128 + (6*tmp119) + (36*x0)), xmask, eviction_policy='evict_last')
tmp139 = tmp138 - tmp137
tmp140 = tmp139 * tmp134
tmp141 = tmp137 + tmp140
tmp142 = tmp136 - tmp141
tmp143 = tmp119.to(tl.float32)
tmp144 = tmp118 - tmp143
tmp145 = triton_helpers.maximum(tmp144, tmp9)
tmp146 = triton_helpers.minimum(tmp145, tmp60)
tmp147 = tmp142 * tmp146
tmp148 = tmp141 + tmp147
tmp149 = tmp82 + tmp115
tmp150 = tmp149 + tmp148
tl.store(in_out_ptr4 + (r1 + (16*x0)), tmp150, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten._adaptive_avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused__adaptive_avg_pool2d_0.run(arg0_1, buf10, 576, grid=grid(576), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten._adaptive_avg_pool2d]
triton_poi_fused__adaptive_avg_pool2d_1.run(arg0_1, buf7, 144, grid=grid(144), stream=stream0)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = buf11; del buf11 # reuse
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [x, x_1, feat, x_2, x_3, feat_1, x_5, feat_2, x_7, feat_3], Original ATen: [aten.mean, aten._to_copy, aten.arange, aten.mul, aten.clamp, aten._unsafe_index, aten.sub, aten.add, aten._adaptive_avg_pool2d]
triton_per_fused__adaptive_avg_pool2d__to_copy__unsafe_index_add_arange_clamp_mean_mul_sub_2.run(buf13, arg0_1, buf7, buf10, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
del buf10
del buf7
return (buf13, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class PyramidPoolingModule(nn.Module):
def __init__(self, pyramids=[1, 2, 3, 6]):
super(PyramidPoolingModule, self).__init__()
self.pyramids = pyramids
def forward(self, input):
feat = input
height, width = input.shape[2:]
for bin_size in self.pyramids:
x = F.adaptive_avg_pool2d(input, output_size=bin_size)
x = F.interpolate(x, size=(height, width), mode='bilinear',
align_corners=True)
feat = feat + x
return feat
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 6 % 6
x0 = xindex % 6
x2 = xindex // 36
x4 = xindex
tmp0 = 2 * x1 // 3
tmp1 = (9 + 4 * x1) // 6
tmp2 = tmp0 < tmp1
tmp3 = 2 * x0 // 3
tmp4 = (9 + 4 * x0) // 6
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + (4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 // 3),
tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp8 = 1 + 2 * x0 // 3
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + 4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 //
3), tmp10 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + 2 * x1 // 3
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + 4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 //
3), tmp15 & xmask, eviction_policy='evict_last', other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + 4 * (2 * x1 // 3) + 16 * x2 + 2 * x0 //
3), tmp18 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr0 + x4, tmp30, xmask)
@triton.jit
def triton_poi_fused__adaptive_avg_pool2d_1(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 3 % 3
x0 = xindex % 3
x2 = xindex // 9
x4 = xindex
tmp0 = 4 * x1 // 3
tmp1 = 2 + 4 * x1 // 3
tmp2 = tmp0 < tmp1
tmp3 = 4 * x0 // 3
tmp4 = 2 + 4 * x0 // 3
tmp5 = tmp3 < tmp4
tmp6 = tmp2 & tmp5
tmp7 = tl.load(in_ptr0 + (4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 // 3),
tmp6 & xmask, other=0.0)
tmp8 = 1 + 4 * x0 // 3
tmp9 = tmp8 < tmp4
tmp10 = tmp2 & tmp9
tmp11 = tl.load(in_ptr0 + (1 + 4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 //
3), tmp10 & xmask, other=0.0)
tmp12 = tmp11 + tmp7
tmp13 = 1 + 4 * x1 // 3
tmp14 = tmp13 < tmp1
tmp15 = tmp14 & tmp5
tmp16 = tl.load(in_ptr0 + (4 + 4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 //
3), tmp15 & xmask, other=0.0)
tmp17 = tmp16 + tmp12
tmp18 = tmp14 & tmp9
tmp19 = tl.load(in_ptr0 + (5 + 4 * (4 * x1 // 3) + 16 * x2 + 4 * x0 //
3), tmp18 & xmask, other=0.0)
tmp20 = tmp19 + tmp17
tmp21 = 1.0
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp6, tmp21, tmp22)
tmp24 = tl.where(tmp10, tmp21, tmp22)
tmp25 = tmp24 + tmp23
tmp26 = tl.where(tmp15, tmp21, tmp22)
tmp27 = tmp26 + tmp25
tmp28 = tl.where(tmp18, tmp21, tmp22)
tmp29 = tmp28 + tmp27
tmp30 = tmp20 / tmp29
tl.store(out_ptr0 + x4, tmp30, xmask)
@triton.jit
def triton_per_fused__adaptive_avg_pool2d__to_copy__unsafe_index_add_arange_clamp_mean_mul_sub_2(
in_out_ptr4, in_ptr0, in_ptr1, in_ptr2, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 4
r2 = rindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = r3
tmp6 = tmp5.to(tl.float32)
tmp7 = 0.3333333333333333
tmp8 = tmp6 * tmp7
tmp9 = 0.0
tmp10 = triton_helpers.maximum(tmp8, tmp9)
tmp11 = tmp10.to(tl.int32)
tmp12 = tl.full([1, 1], 1, tl.int64)
tmp13 = tmp11 + tmp12
tmp14 = triton_helpers.minimum(tmp13, tmp12)
tmp15 = r2
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp16 * tmp7
tmp18 = triton_helpers.maximum(tmp17, tmp9)
tmp19 = tmp18.to(tl.int32)
tmp20 = tmp19 + tmp12
tmp21 = triton_helpers.minimum(tmp20, tmp12)
tmp22 = tl.load(in_ptr0 + (2 * tmp21 + 8 * tmp14 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (1 + 2 * tmp21 + 8 * tmp14 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp24 = tmp23 + tmp22
tmp25 = tl.load(in_ptr0 + (4 + 2 * tmp21 + 8 * tmp14 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp26 = tmp25 + tmp24
tmp27 = tl.load(in_ptr0 + (5 + 2 * tmp21 + 8 * tmp14 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp28 = tmp27 + tmp26
tmp29 = 0.25
tmp30 = tmp28 * tmp29
tmp31 = tl.load(in_ptr0 + (2 * tmp19 + 8 * tmp14 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp32 = tl.load(in_ptr0 + (1 + 2 * tmp19 + 8 * tmp14 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp33 = tmp32 + tmp31
tmp34 = tl.load(in_ptr0 + (4 + 2 * tmp19 + 8 * tmp14 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp35 = tmp34 + tmp33
tmp36 = tl.load(in_ptr0 + (5 + 2 * tmp19 + 8 * tmp14 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp37 = tmp36 + tmp35
tmp38 = tmp37 * tmp29
tmp39 = tmp30 - tmp38
tmp40 = tl.load(in_ptr0 + (2 * tmp21 + 8 * tmp11 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp41 = tl.load(in_ptr0 + (1 + 2 * tmp21 + 8 * tmp11 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp42 = tmp41 + tmp40
tmp43 = tl.load(in_ptr0 + (4 + 2 * tmp21 + 8 * tmp11 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp44 = tmp43 + tmp42
tmp45 = tl.load(in_ptr0 + (5 + 2 * tmp21 + 8 * tmp11 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp46 = tmp45 + tmp44
tmp47 = tmp46 * tmp29
tmp48 = tl.load(in_ptr0 + (2 * tmp19 + 8 * tmp11 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp49 = tl.load(in_ptr0 + (1 + 2 * tmp19 + 8 * tmp11 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp50 = tmp49 + tmp48
tmp51 = tl.load(in_ptr0 + (4 + 2 * tmp19 + 8 * tmp11 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp52 = tmp51 + tmp50
tmp53 = tl.load(in_ptr0 + (5 + 2 * tmp19 + 8 * tmp11 + 16 * x0), xmask,
eviction_policy='evict_last')
tmp54 = tmp53 + tmp52
tmp55 = tmp54 * tmp29
tmp56 = tmp47 - tmp55
tmp57 = tmp19.to(tl.float32)
tmp58 = tmp18 - tmp57
tmp59 = triton_helpers.maximum(tmp58, tmp9)
tmp60 = 1.0
tmp61 = triton_helpers.minimum(tmp59, tmp60)
tmp62 = tmp39 * tmp61
tmp63 = tmp38 + tmp62
tmp64 = tmp56 * tmp61
tmp65 = tmp55 + tmp64
tmp66 = 16.0
tmp67 = tmp4 / tmp66
tmp68 = tmp67 - tmp67
tmp69 = tmp68 * tmp9
tmp70 = tmp67 + tmp69
tmp71 = tmp70 - tmp70
tmp72 = tmp71 * tmp9
tmp73 = tmp70 + tmp72
tmp74 = tmp0 + tmp73
tmp75 = tmp63 - tmp65
tmp76 = tmp11.to(tl.float32)
tmp77 = tmp10 - tmp76
tmp78 = triton_helpers.maximum(tmp77, tmp9)
tmp79 = triton_helpers.minimum(tmp78, tmp60)
tmp80 = tmp75 * tmp79
tmp81 = tmp65 + tmp80
tmp82 = tmp74 + tmp81
tmp83 = 0.6666666666666666
tmp84 = tmp6 * tmp83
tmp85 = triton_helpers.maximum(tmp84, tmp9)
tmp86 = tmp85.to(tl.int32)
tmp87 = tmp86 + tmp12
tmp88 = tl.full([1, 1], 2, tl.int64)
tmp89 = triton_helpers.minimum(tmp87, tmp88)
tmp90 = tmp16 * tmp83
tmp91 = triton_helpers.maximum(tmp90, tmp9)
tmp92 = tmp91.to(tl.int32)
tmp93 = tl.load(in_ptr1 + (tmp92 + 3 * tmp89 + 9 * x0), xmask,
eviction_policy='evict_last')
tmp94 = tmp92 + tmp12
tmp95 = triton_helpers.minimum(tmp94, tmp88)
tmp96 = tl.load(in_ptr1 + (tmp95 + 3 * tmp89 + 9 * x0), xmask,
eviction_policy='evict_last')
tmp97 = tmp96 - tmp93
tmp98 = tmp92.to(tl.float32)
tmp99 = tmp91 - tmp98
tmp100 = triton_helpers.maximum(tmp99, tmp9)
tmp101 = triton_helpers.minimum(tmp100, tmp60)
tmp102 = tmp97 * tmp101
tmp103 = tmp93 + tmp102
tmp104 = tl.load(in_ptr1 + (tmp92 + 3 * tmp86 + 9 * x0), xmask,
eviction_policy='evict_last')
tmp105 = tl.load(in_ptr1 + (tmp95 + 3 * tmp86 + 9 * x0), xmask,
eviction_policy='evict_last')
tmp106 = tmp105 - tmp104
tmp107 = tmp106 * tmp101
tmp108 = tmp104 + tmp107
tmp109 = tmp103 - tmp108
tmp110 = tmp86.to(tl.float32)
tmp111 = tmp85 - tmp110
tmp112 = triton_helpers.maximum(tmp111, tmp9)
tmp113 = triton_helpers.minimum(tmp112, tmp60)
tmp114 = tmp109 * tmp113
tmp115 = tmp108 + tmp114
tmp116 = 1.6666666666666667
tmp117 = tmp6 * tmp116
tmp118 = triton_helpers.maximum(tmp117, tmp9)
tmp119 = tmp118.to(tl.int32)
tmp120 = tmp119 + tmp12
tmp121 = tl.full([1, 1], 5, tl.int64)
tmp122 = triton_helpers.minimum(tmp120, tmp121)
tmp123 = tmp16 * tmp116
tmp124 = triton_helpers.maximum(tmp123, tmp9)
tmp125 = tmp124.to(tl.int32)
tmp126 = tl.load(in_ptr2 + (tmp125 + 6 * tmp122 + 36 * x0), xmask,
eviction_policy='evict_last')
tmp127 = tmp125 + tmp12
tmp128 = triton_helpers.minimum(tmp127, tmp121)
tmp129 = tl.load(in_ptr2 + (tmp128 + 6 * tmp122 + 36 * x0), xmask,
eviction_policy='evict_last')
tmp130 = tmp129 - tmp126
tmp131 = tmp125.to(tl.float32)
tmp132 = tmp124 - tmp131
tmp133 = triton_helpers.maximum(tmp132, tmp9)
tmp134 = triton_helpers.minimum(tmp133, tmp60)
tmp135 = tmp130 * tmp134
tmp136 = tmp126 + tmp135
tmp137 = tl.load(in_ptr2 + (tmp125 + 6 * tmp119 + 36 * x0), xmask,
eviction_policy='evict_last')
tmp138 = tl.load(in_ptr2 + (tmp128 + 6 * tmp119 + 36 * x0), xmask,
eviction_policy='evict_last')
tmp139 = tmp138 - tmp137
tmp140 = tmp139 * tmp134
tmp141 = tmp137 + tmp140
tmp142 = tmp136 - tmp141
tmp143 = tmp119.to(tl.float32)
tmp144 = tmp118 - tmp143
tmp145 = triton_helpers.maximum(tmp144, tmp9)
tmp146 = triton_helpers.minimum(tmp145, tmp60)
tmp147 = tmp142 * tmp146
tmp148 = tmp141 + tmp147
tmp149 = tmp82 + tmp115
tmp150 = tmp149 + tmp148
tl.store(in_out_ptr4 + (r1 + 16 * x0), tmp150, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf10 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32
)
get_raw_stream(0)
triton_poi_fused__adaptive_avg_pool2d_0[grid(576)](arg0_1, buf10,
576, XBLOCK=256, num_warps=4, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 3, 3), (36, 9, 3, 1), torch.float32)
triton_poi_fused__adaptive_avg_pool2d_1[grid(144)](arg0_1, buf7,
144, XBLOCK=256, num_warps=4, num_stages=1)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = buf11
del buf11
buf13 = buf12
del buf12
triton_per_fused__adaptive_avg_pool2d__to_copy__unsafe_index_add_arange_clamp_mean_mul_sub_2[
grid(16)](buf13, arg0_1, buf7, buf10, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
del buf10
del buf7
return buf13,
class PyramidPoolingModuleNew(nn.Module):
def __init__(self, pyramids=[1, 2, 3, 6]):
super(PyramidPoolingModuleNew, self).__init__()
self.pyramids = pyramids
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| nassir90/ICNet-pytorch | PyramidPoolingModule | false | 16,140 | [
"MIT"
] | 83 | af6eec01a4419ce43c52d295bc502c366478fbd7 | https://github.com/nassir90/ICNet-pytorch/tree/af6eec01a4419ce43c52d295bc502c366478fbd7 |
RobertaClassificationHead | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ey/cey6dsgmzj2byupf73e6nwt5fetf5ne2sa57kzcmy7ejvaqhqb72.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_3 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_1.run(buf2, primals_3, buf4, 64, grid=grid(64), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(buf2, (16, 4), (4, 1), 0), primals_4, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from typing import Optional
class RobertaClassificationHead(nn.Module):
def __init__(self, num_classes, input_dim, inner_dim: 'Optional[int]'=
None, dropout: 'float'=0.1, activation=nn.ReLU):
super().__init__()
if not inner_dim:
inner_dim = input_dim
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
self.activation_fn = activation()
def forward(self, features):
x = features[:, 0, :]
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_classes': 4, 'input_dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from typing import Optional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_1(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_1[grid(64)](buf2,
primals_3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(
buf2, (16, 4), (4, 1), 0), primals_4, buf4
class RobertaClassificationHeadNew(nn.Module):
def __init__(self, num_classes, input_dim, inner_dim: 'Optional[int]'=
None, dropout: 'float'=0.1, activation=nn.ReLU):
super().__init__()
if not inner_dim:
inner_dim = input_dim
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
self.activation_fn = activation()
def forward(self, input_0):
primals_2 = self.dense.weight
primals_3 = self.dense.bias
primals_4 = self.out_proj.weight
primals_5 = self.out_proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
| nateanl/text | RobertaClassificationHead | false | 16,141 | [
"BSD-3-Clause"
] | 3,172 | b26e9350ad387a84aefe131443bbbf1c51a8a618 | https://github.com/nateanl/text/tree/b26e9350ad387a84aefe131443bbbf1c51a8a618 |
BatchSpectralShrinkage | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3s/c3skx4mn2m33smtk7yzu674msvwvmvura3rr2xlhz6y3zxy3e2j4.py
# Topologically Sorted Source Nodes: [pow_1, result], Original ATen: [aten.pow, aten.add]
# Source node to ATen node mapping:
# pow_1 => pow_1
# result => add
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%select, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, 0), kwargs = {})
triton_poi_fused_add_pow_0 = async_compile.triton('triton_poi_fused_add_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + (3))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tmp1 * tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [svd], Original ATen: [aten._linalg_svd]
buf0 = torch.ops.aten._linalg_svd.default(reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0))
del arg0_1
buf2 = buf0[1]
del buf0
buf4 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [pow_1, result], Original ATen: [aten.pow, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_pow_0.run(buf2, buf4, 1, grid=grid(1), stream=stream0)
del buf2
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class BatchSpectralShrinkage(nn.Module):
"""
The regularization term in `Catastrophic Forgetting Meets Negative Transfer:
Batch Spectral Shrinkage for Safe Transfer Learning (NIPS 2019) <https://proceedings.neurips.cc/paper/2019/file/c6bff625bdb0393992c9d4db0c6bbe45-Paper.pdf>`_.
The BSS regularization of feature matrix :math:`F` can be described as:
.. math::
L_{bss}(F) = \\sum_{i=1}^{k} \\sigma_{-i}^2 ,
where :math:`k` is the number of singular values to be penalized, :math:`\\sigma_{-i}` is the :math:`i`-th smallest singular value of feature matrix :math:`F`.
All the singular values of feature matrix :math:`F` are computed by `SVD`:
.. math::
F = U\\Sigma V^T,
where the main diagonal elements of the singular value matrix :math:`\\Sigma` is :math:`[\\sigma_1, \\sigma_2, ..., \\sigma_b]`.
Args:
k (int): The number of singular values to be penalized. Default: 1
Shape:
- Input: :math:`(b, |\\mathcal{f}|)` where :math:`b` is the batch size and :math:`|\\mathcal{f}|` is feature dimension.
- Output: scalar.
"""
def __init__(self, k=1):
super(BatchSpectralShrinkage, self).__init__()
self.k = k
def forward(self, feature):
result = 0
_u, s, _v = torch.svd(feature.t())
num = s.size(0)
for i in range(self.k):
result += torch.pow(s[num - 1 - i], 2)
return result
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.load(in_ptr0 + 3)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tmp1 * tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp4, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = torch.ops.aten._linalg_svd.default(reinterpret_tensor(arg0_1,
(4, 4), (1, 4), 0))
del arg0_1
buf2 = buf0[1]
del buf0
buf4 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_pow_0[grid(1)](buf2, buf4, 1, XBLOCK=1,
num_warps=1, num_stages=1)
del buf2
return buf4,
class BatchSpectralShrinkageNew(nn.Module):
"""
The regularization term in `Catastrophic Forgetting Meets Negative Transfer:
Batch Spectral Shrinkage for Safe Transfer Learning (NIPS 2019) <https://proceedings.neurips.cc/paper/2019/file/c6bff625bdb0393992c9d4db0c6bbe45-Paper.pdf>`_.
The BSS regularization of feature matrix :math:`F` can be described as:
.. math::
L_{bss}(F) = \\sum_{i=1}^{k} \\sigma_{-i}^2 ,
where :math:`k` is the number of singular values to be penalized, :math:`\\sigma_{-i}` is the :math:`i`-th smallest singular value of feature matrix :math:`F`.
All the singular values of feature matrix :math:`F` are computed by `SVD`:
.. math::
F = U\\Sigma V^T,
where the main diagonal elements of the singular value matrix :math:`\\Sigma` is :math:`[\\sigma_1, \\sigma_2, ..., \\sigma_b]`.
Args:
k (int): The number of singular values to be penalized. Default: 1
Shape:
- Input: :math:`(b, |\\mathcal{f}|)` where :math:`b` is the batch size and :math:`|\\mathcal{f}|` is feature dimension.
- Output: scalar.
"""
def __init__(self, k=1):
super(BatchSpectralShrinkageNew, self).__init__()
self.k = k
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| neka-nat/Transfer-Learning-Library | BatchSpectralShrinkage | false | 16,142 | [
"MIT"
] | 1,474 | a3b27b0d7562fa90a02e914140b37ab438469e6c | https://github.com/neka-nat/Transfer-Learning-Library/tree/a3b27b0d7562fa90a02e914140b37ab438469e6c |
CIFAR10_Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3u/c3ucft62zfsdldfp5jo3ilwlzjqnalgzyy5tm2e2sonktu7eernk.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 460800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 3600) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/v3/cv3g6yelvgzwyu3llhoytuo5q7ieh4is6hu4eainxjbiiqaknnwb.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 401408
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 3136) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gg/cggecmyxgs77o5tqjqxnyucpmbtlvs2jzafuuusmwxnflcxxjzqi.py
# Topologically Sorted Source Nodes: [max_pool2d, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
# Source node to ATen node mapping:
# max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1
# x_1 => relu_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 100352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 28
x1 = (xindex // 28)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (112*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (112*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (56 + (2*x0) + (112*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (57 + (2*x0) + (112*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ar/carehaf65zlhitys3iauvqzlcwbhk4nbxzybckcleuv3qwzs73un.py
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 147456
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 576) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ua/cuan73uvblihzk5ct3zyws3uv6vww7zp5c5rnangiz2f6h2vwjln.py
# Topologically Sorted Source Nodes: [max_pool2d_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# x_2 => relu_2
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 36864
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 12
x1 = (xindex // 12)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (24 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (25 + (2*x0) + (48*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + (x2), tmp15, None)
tl.store(out_ptr1 + (x2), tmp18, None)
tl.store(out_ptr2 + (x2), tmp20, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6q/c6qkmv345ky7fthyztagmptgxczer674zxxuwwnabo4nhw4dnwlj.py
# Topologically Sorted Source Nodes: [e1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# e1 => relu_3
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (50, 1024), (1024, 1))
assert_size_stride(primals_9, (50, ), (1, ))
assert_size_stride(primals_10, (10, 50), (50, 1))
assert_size_stride(primals_11, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 60, 60), (115200, 3600, 60, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 460800, grid=grid(460800), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 56, 56), (100352, 3136, 56, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 401408, grid=grid(401408), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 32, 28, 28), (25088, 784, 28, 1), torch.int8)
buf5 = empty_strided_cuda((4, 32, 28, 28), (25088, 784, 28, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
triton_poi_fused_max_pool2d_with_indices_relu_2.run(buf3, buf4, buf5, 100352, grid=grid(100352), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 24, 24), (36864, 576, 24, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf7, primals_7, 147456, grid=grid(147456), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.int8)
buf9 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.float32)
buf13 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1), torch.bool)
# Topologically Sorted Source Nodes: [max_pool2d_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward]
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4.run(buf7, buf8, buf9, buf13, 36864, grid=grid(36864), stream=stream0)
buf10 = empty_strided_cuda((36, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf9, (36, 1024), (1024, 1), 0), reinterpret_tensor(primals_8, (1024, 50), (1, 1024), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [e1], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_9, 1800, grid=grid(1800), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((36, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(primals_10, (50, 10), (1, 50), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, buf11, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf4, buf5, buf7, buf8, reinterpret_tensor(buf9, (36, 1024), (1024, 1), 0), buf11, primals_10, primals_8, buf13, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((50, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class CIFAR10_Net(nn.Module):
def __init__(self):
super(CIFAR10_Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 32, kernel_size=5)
self.conv3 = nn.Conv2d(32, 64, kernel_size=5)
self.fc1 = nn.Linear(1024, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = F.relu(F.max_pool2d(self.conv3(x), 2))
x = x.view(-1, 1024)
e1 = F.relu(self.fc1(x))
x = F.dropout(e1, training=self.training)
x = self.fc2(x)
return x, e1
def get_embedding_dim(self):
return 50
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3600 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 3136 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_2(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 28
x1 = xindex // 28
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 112 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 112 * x1), None, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (56 + 2 * x0 + 112 * x1), None,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (57 + 2 * x0 + 112 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp18, None)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 576 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 12
x1 = xindex // 12
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 48 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 48 * x1), None, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr0 + (24 + 2 * x0 + 48 * x1), None, eviction_policy
='evict_last')
tmp12 = tl.load(in_ptr0 + (25 + 2 * x0 + 48 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + x2, tmp15, None)
tl.store(out_ptr1 + x2, tmp18, None)
tl.store(out_ptr2 + x2, tmp20, None)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (50, 1024), (1024, 1))
assert_size_stride(primals_9, (50,), (1,))
assert_size_stride(primals_10, (10, 50), (50, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 60, 60), (115200, 3600, 60, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(460800)](buf1, primals_2,
460800, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 56, 56), (100352, 3136, 56, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(401408)](buf3, primals_5,
401408, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 32, 28, 28), (25088, 784, 28, 1),
torch.int8)
buf5 = empty_strided_cuda((4, 32, 28, 28), (25088, 784, 28, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_2[grid(100352)](buf3,
buf4, buf5, 100352, XBLOCK=512, num_warps=8, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 64, 24, 24), (36864, 576, 24, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_3[grid(147456)](buf7, primals_7,
147456, XBLOCK=512, num_warps=8, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1),
torch.int8)
buf9 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1),
torch.float32)
buf13 = empty_strided_cuda((4, 64, 12, 12), (9216, 144, 12, 1),
torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4[grid
(36864)](buf7, buf8, buf9, buf13, 36864, XBLOCK=512, num_warps=
4, num_stages=1)
buf10 = empty_strided_cuda((36, 50), (50, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (36, 1024), (1024, 1), 0
), reinterpret_tensor(primals_8, (1024, 50), (1, 1024), 0), out
=buf10)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(1800)](buf11, primals_9, 1800, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((36, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf11, reinterpret_tensor(
primals_10, (50, 10), (1, 50), 0), alpha=1, beta=1, out=buf12)
del primals_11
return (buf12, buf11, primals_1, primals_3, primals_4, primals_6, buf1,
buf3, buf4, buf5, buf7, buf8, reinterpret_tensor(buf9, (36, 1024),
(1024, 1), 0), buf11, primals_10, primals_8, buf13)
class CIFAR10_NetNew(nn.Module):
def __init__(self):
super(CIFAR10_NetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=5)
self.conv2 = nn.Conv2d(32, 32, kernel_size=5)
self.conv3 = nn.Conv2d(32, 64, kernel_size=5)
self.fc1 = nn.Linear(1024, 50)
self.fc2 = nn.Linear(50, 10)
def get_embedding_dim(self):
return 50
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0], output[1]
| nannullna/deep-active-learning | CIFAR10_Net | false | 16,143 | [
"MIT"
] | 465 | c54a995640c63ba4679129c5a1fd5cec9a2858e6 | https://github.com/nannullna/deep-active-learning/tree/c54a995640c63ba4679129c5a1fd5cec9a2858e6 |
BridgeFeatLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ds/cdswxvs3hoxarp72punvmpueo4aqrc4ih7ujshqdg4qhgexbwiig.py
# Topologically Sorted Source Nodes: [dist_mixed], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# dist_mixed => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%sqrt, %sqrt_1], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 2
x0 = xindex % 16
x2 = (xindex // 32)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (16 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp14 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (32 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (48 + x0 + (64*x2)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tmp18 + tmp22
tmp24 = 1e-12
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = libdevice.sqrt(tmp25)
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tmp30 = tl.full([1], 2, tl.int64)
tmp31 = tmp0 < tmp30
tmp32 = tl.load(in_ptr0 + (x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp34
tmp36 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp37 = tl.load(in_ptr2 + (16 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 - tmp37
tmp39 = tmp38 * tmp38
tmp40 = tmp35 + tmp39
tmp41 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp42 = tl.load(in_ptr2 + (32 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp43 = tmp41 - tmp42
tmp44 = tmp43 * tmp43
tmp45 = tmp40 + tmp44
tmp46 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp47 = tl.load(in_ptr2 + (48 + x0 + (64*x2)), tmp29 & xmask, eviction_policy='evict_last', other=0.0)
tmp48 = tmp46 - tmp47
tmp49 = tmp48 * tmp48
tmp50 = tmp45 + tmp49
tmp51 = triton_helpers.maximum(tmp50, tmp24)
tmp52 = libdevice.sqrt(tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp29, tmp52, tmp53)
tmp55 = tl.where(tmp4, tmp28, tmp54)
tl.store(out_ptr0 + (x3), tmp55, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ck/cckdt23kyogbyhrcnu7l23peqelhmrhx37dr5tgwdfuqdi7vissw.py
# Topologically Sorted Source Nodes: [mul, lam_dist_mixed, loss], Original ATen: [aten.mul, aten.sum, aten.mean]
# Source node to ATen node mapping:
# lam_dist_mixed => sum_3
# loss => mean
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg3_1, %cat), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1], True), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_3,), kwargs = {})
triton_per_fused_mean_mul_sum_1 = async_compile.triton('triton_per_fused_mean_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_mul_sum_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_mul_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r0 + (32*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (32*r1)), None)
tmp3 = tl.load(in_ptr0 + (16 + r0 + (32*r1)), None)
tmp4 = tl.load(in_ptr1 + (16 + r0 + (32*r1)), None)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp11, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 2, 4, 4), (32, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dist_mixed], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(arg0_1, arg1_1, arg2_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mul, lam_dist_mixed, loss], Original ATen: [aten.mul, aten.sum, aten.mean]
triton_per_fused_mean_mul_sum_1.run(buf2, arg3_1, buf0, 1, 64, grid=grid(1), stream=stream0)
del arg3_1
del buf0
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class BridgeFeatLoss(nn.Module):
def __init__(self):
super(BridgeFeatLoss, self).__init__()
def forward(self, f_s, f_t, f_mixed, lam):
dist_mixed2s = ((f_mixed - f_s) ** 2).sum(1, keepdim=True)
dist_mixed2t = ((f_mixed - f_t) ** 2).sum(1, keepdim=True)
dist_mixed2s = dist_mixed2s.clamp(min=1e-12).sqrt()
dist_mixed2t = dist_mixed2t.clamp(min=1e-12).sqrt()
dist_mixed = torch.cat((dist_mixed2s, dist_mixed2t), 1)
lam_dist_mixed = (lam * dist_mixed).sum(1, keepdim=True)
loss = lam_dist_mixed.mean()
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 2, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 2
x0 = xindex % 16
x2 = xindex // 32
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x0 + 64 * x2), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tmp7 * tmp7
tmp9 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.load(in_ptr1 + (16 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr1 + (32 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp20 = tl.load(in_ptr1 + (48 + x0 + 64 * x2), tmp4 & xmask,
eviction_policy='evict_last', other=0.0)
tmp21 = tmp19 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = tmp18 + tmp22
tmp24 = 1e-12
tmp25 = triton_helpers.maximum(tmp23, tmp24)
tmp26 = libdevice.sqrt(tmp25)
tmp27 = tl.full(tmp26.shape, 0.0, tmp26.dtype)
tmp28 = tl.where(tmp4, tmp26, tmp27)
tmp29 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp32 = tl.load(in_ptr0 + (x0 + 64 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp33 = tl.load(in_ptr2 + (x0 + 64 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp34 = tmp32 - tmp33
tmp35 = tmp34 * tmp34
tmp36 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp37 = tl.load(in_ptr2 + (16 + x0 + 64 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp38 = tmp36 - tmp37
tmp39 = tmp38 * tmp38
tmp40 = tmp35 + tmp39
tmp41 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp42 = tl.load(in_ptr2 + (32 + x0 + 64 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp43 = tmp41 - tmp42
tmp44 = tmp43 * tmp43
tmp45 = tmp40 + tmp44
tmp46 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp47 = tl.load(in_ptr2 + (48 + x0 + 64 * x2), tmp29 & xmask,
eviction_policy='evict_last', other=0.0)
tmp48 = tmp46 - tmp47
tmp49 = tmp48 * tmp48
tmp50 = tmp45 + tmp49
tmp51 = triton_helpers.maximum(tmp50, tmp24)
tmp52 = libdevice.sqrt(tmp51)
tmp53 = tl.full(tmp52.shape, 0.0, tmp52.dtype)
tmp54 = tl.where(tmp29, tmp52, tmp53)
tmp55 = tl.where(tmp4, tmp28, tmp54)
tl.store(out_ptr0 + x3, tmp55, xmask)
@triton.jit
def triton_per_fused_mean_mul_sum_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 32 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 32 * r1), None)
tmp3 = tl.load(in_ptr0 + (16 + r0 + 32 * r1), None)
tmp4 = tl.load(in_ptr1 + (16 + r0 + 32 * r1), None)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp10 = 64.0
tmp11 = tmp9 / tmp10
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp11, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 2, 4, 4), (32, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 2, 4, 4), (32, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(128)](arg0_1, arg1_1, arg2_1, buf0, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
triton_per_fused_mean_mul_sum_1[grid(1)](buf2, arg3_1, buf0, 1, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del arg3_1
del buf0
return buf2,
class BridgeFeatLossNew(nn.Module):
def __init__(self):
super(BridgeFeatLossNew, self).__init__()
def forward(self, input_0, input_1, input_2, input_3):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0]
| neka-nat/Transfer-Learning-Library | BridgeFeatLoss | false | 16,144 | [
"MIT"
] | 1,474 | a3b27b0d7562fa90a02e914140b37ab438469e6c | https://github.com/neka-nat/Transfer-Learning-Library/tree/a3b27b0d7562fa90a02e914140b37ab438469e6c |
DirectedGraphConvolution | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yo/cyon5wgvtctwibf7q6thlcsb7zc2udnpmprtgpfcylzz5ajrfrda.py
# Topologically Sorted Source Nodes: [sum_1, rowsum, norm_adj, sum_2, rowsum_1, inv_norm_adj], Original ATen: [aten.sum, aten.repeat, aten.div]
# Source node to ATen node mapping:
# inv_norm_adj => div_1
# norm_adj => div
# rowsum => repeat
# rowsum_1 => repeat_1
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%primals_1, [2], True), kwargs = {})
# %repeat : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%sum_1, [1, 1, 4]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %repeat), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute, [2], True), kwargs = {})
# %repeat_1 : [num_users=1] = call_function[target=torch.ops.aten.repeat.default](args = (%sum_2, [1, 1, 4]), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute, %repeat_1), kwargs = {})
triton_poi_fused_div_repeat_sum_0 = async_compile.triton('triton_poi_fused_div_repeat_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_repeat_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_repeat_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
x3 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (4 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (8 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (12 + x0 + (16*x3)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp0 / tmp15
tl.store(out_ptr0 + (x4), tmp8, xmask)
tl.store(out_ptr1 + (x4), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2g/c2gow746iojnl6yugujjn3non5klwrqsxgmhc4ib5irxlfwbv7ap.py
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_1 = async_compile.triton('triton_poi_fused_clone_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/uw/cuwlkalong3cwuxng3zurtt3tgbk7ithqpljqcehvo5fjqbtfwrm.py
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# matmul_3 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_2 = async_compile.triton('triton_poi_fused_clone_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (4*x0) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4g/c4gaxndascuo6ecsxixaubevwgh2bynn7zvjxdjdfvpibojmfoiz.py
# Topologically Sorted Source Nodes: [output1, output2, add, out], Original ATen: [aten.relu, aten.add, aten.div, aten.threshold_backward]
# Source node to ATen node mapping:
# add => add
# out => div_2
# output1 => relu
# output2 => relu_1
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_4,), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_9,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%relu, %relu_1), kwargs = {})
# %div_2 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, 2), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_add_div_relu_threshold_backward_3 = async_compile.triton('triton_poi_fused_add_div_relu_threshold_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: '*i1', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_relu_threshold_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_relu_threshold_backward_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp6 = 0.5
tmp7 = tmp5 * tmp6
tmp8 = 0.0
tmp9 = tmp4 <= tmp8
tmp10 = tmp2 <= tmp8
tl.store(out_ptr0 + (x0), tmp7, xmask)
tl.store(out_ptr1 + (x0), tmp9, xmask)
tl.store(out_ptr2 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [sum_1, rowsum, norm_adj, sum_2, rowsum_1, inv_norm_adj], Original ATen: [aten.sum, aten.repeat, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_repeat_sum_0.run(primals_1, buf1, buf5, 64, grid=grid(64), stream=stream0)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.clone]
triton_poi_fused_clone_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
del buf1
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), primals_4, out=buf4)
del primals_4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.clone]
triton_poi_fused_clone_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf5
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [output1, output2, add, out], Original ATen: [aten.relu, aten.add, aten.div, aten.threshold_backward]
triton_poi_fused_add_div_relu_threshold_backward_3.run(buf3, buf7, buf8, buf9, buf10, 256, grid=grid(256), stream=stream0)
del buf3
del buf7
return (buf8, buf9, reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0), reinterpret_tensor(primals_3, (4, 64), (1, 4), 0), buf10, reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
def normalize_adj(adj):
last_dim = adj.size(-1)
rowsum = adj.sum(2, keepdim=True).repeat(1, 1, last_dim)
return torch.div(adj, rowsum)
class DirectedGraphConvolution(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight1 = nn.Parameter(torch.zeros((in_features, out_features)))
self.weight2 = nn.Parameter(torch.zeros((in_features, out_features)))
self.dropout = nn.Dropout(0.1)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight1.data)
nn.init.xavier_uniform_(self.weight2.data)
def forward(self, inputs, adj):
norm_adj = normalize_adj(adj)
output1 = F.relu(torch.matmul(norm_adj, torch.matmul(inputs, self.
weight1)))
inv_norm_adj = normalize_adj(adj.transpose(1, 2))
output2 = F.relu(torch.matmul(inv_norm_adj, torch.matmul(inputs,
self.weight2)))
out = (output1 + output2) / 2
out = self.dropout(out)
return out
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_repeat_sum_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x1 = xindex // 4
x0 = xindex % 4
x3 = xindex // 16
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + (4 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr0 + (8 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (12 + x0 + 16 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp0 / tmp15
tl.store(out_ptr0 + x4, tmp8, xmask)
tl.store(out_ptr1 + x4, tmp16, xmask)
@triton.jit
def triton_poi_fused_clone_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 64
x2 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_clone_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 4 * x0 + 16 * x2), xmask,
eviction_policy='evict_last')
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_div_relu_threshold_backward_3(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + x0, xmask)
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp1, tmp3)
tmp5 = tmp2 + tmp4
tmp6 = 0.5
tmp7 = tmp5 * tmp6
tmp8 = 0.0
tmp9 = tmp4 <= tmp8
tmp10 = tmp2 <= tmp8
tl.store(out_ptr0 + x0, tmp7, xmask)
tl.store(out_ptr1 + x0, tmp9, xmask)
tl.store(out_ptr2 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
primals_2, out=buf0)
del primals_2
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4), (16, 1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_repeat_sum_0[grid(64)](primals_1, buf1, buf5,
64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_1
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_1[grid(256)](buf1, buf2, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf1
buf3 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf3)
buf4 = buf0
del buf0
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
primals_4, out=buf4)
del primals_4
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_clone_2[grid(256)](buf5, buf6, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del buf5
buf7 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf6, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf4, (16, 4, 4), (16, 4, 1), 0), out=buf7)
buf8 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
buf9 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_add_div_relu_threshold_backward_3[grid(256)](buf3,
buf7, buf8, buf9, buf10, 256, XBLOCK=256, num_warps=4, num_stages=1
)
del buf3
del buf7
return buf8, buf9, reinterpret_tensor(buf6, (16, 4, 4), (16, 1, 4), 0
), reinterpret_tensor(primals_3, (4, 64), (1, 4), 0
), buf10, reinterpret_tensor(buf2, (16, 4, 4), (16, 1, 4), 0)
def normalize_adj(adj):
last_dim = adj.size(-1)
rowsum = adj.sum(2, keepdim=True).repeat(1, 1, last_dim)
return torch.div(adj, rowsum)
class DirectedGraphConvolutionNew(nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight1 = nn.Parameter(torch.zeros((in_features, out_features)))
self.weight2 = nn.Parameter(torch.zeros((in_features, out_features)))
self.dropout = nn.Dropout(0.1)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.weight1.data)
nn.init.xavier_uniform_(self.weight2.data)
def __repr__(self):
return self.__class__.__name__ + ' (' + str(self.in_features
) + ' -> ' + str(self.out_features) + ')'
def forward(self, input_0, input_1):
primals_2 = self.weight1
primals_4 = self.weight2
primals_3 = input_0
primals_1 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| naszilla/naszilla | DirectedGraphConvolution | false | 16,145 | [
"Apache-2.0"
] | 112 | 5575cc8c95e79ce5743e8ea7ef53d6da900f8480 | https://github.com/naszilla/naszilla/tree/5575cc8c95e79ce5743e8ea7ef53d6da900f8480 |
ShapedSineModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mw/cmwwzmirms7a2rs7dvyp4pq4i4u2riy23hpk6rijtgbopj5f7dyd.py
# Topologically Sorted Source Nodes: [mul, sin], Original ATen: [aten.mul, aten.sin]
# Source node to ATen node mapping:
# mul => mul
# sin => sin
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %primals_2), kwargs = {})
# %sin : [num_users=1] = call_function[target=torch.ops.aten.sin.default](args = (%mul,), kwargs = {})
triton_poi_fused_mul_sin_0 = async_compile.triton('triton_poi_fused_mul_sin_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sin_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sin_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp3 = tmp1 * tmp2
tmp4 = tl_math.sin(tmp3)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sin], Original ATen: [aten.mul, aten.sin]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sin_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.utils.data
class ShapedSineModel(torch.nn.Module):
def __init__(self, theta=None):
super(ShapedSineModel, self).__init__()
if theta is None:
self.freq = torch.nn.Parameter(torch.Tensor([0.1]))
else:
self.freq = torch.nn.Parameter(torch.Tensor([theta]))
self.learning_rate = 1.0
def forward(self, x):
return torch.sin(self.freq * x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sin_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp3 = tmp1 * tmp2
tmp4 = tl_math.sin(tmp3)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (1,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sin_0[grid(256)](primals_1, primals_2, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class ShapedSineModelNew(torch.nn.Module):
def __init__(self, theta=None):
super(ShapedSineModelNew, self).__init__()
if theta is None:
self.freq = torch.nn.Parameter(torch.Tensor([0.1]))
else:
self.freq = torch.nn.Parameter(torch.Tensor([theta]))
self.learning_rate = 1.0
def forward(self, input_0):
primals_1 = self.freq
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
| neha191091/LearningToLearn | ShapedSineModel | false | 16,146 | [
"MIT"
] | 76 | 3619d27bb3b7a836d9423dfbdd8da82460d4fa73 | https://github.com/neha191091/LearningToLearn/tree/3619d27bb3b7a836d9423dfbdd8da82460d4fa73 |
VanillaGenerativeAdversarialLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/dj/cdjw23gjsvidmi7xlhfpigbxfqor2vsiltadbvmh3w5h4gtvhk46.py
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits]
# Source node to ATen node mapping:
# binary_cross_entropy_with_logits => abs_1, exp, full_default, full_default_1, log1p, mean, minimum, mul, neg, sub_1, sub_2
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %arg0_1), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default_1, %arg0_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg0_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 0.0
tmp2 = tmp1 * tmp0
tmp3 = triton_helpers.minimum(tmp1, tmp0)
tmp4 = tl_math.abs(tmp0)
tmp5 = -tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = libdevice.log1p(tmp6)
tmp8 = tmp3 - tmp7
tmp9 = tmp2 - tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 256.0
tmp14 = tmp12 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [binary_cross_entropy_with_logits], Original ATen: [aten.binary_cross_entropy_with_logits]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class VanillaGenerativeAdversarialLoss(nn.Module):
"""
Loss for `Vanilla Generative Adversarial Network <https://arxiv.org/abs/1406.2661>`_
Args:
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- prediction (tensor): unnormalized discriminator predictions
- real (bool): if the ground truth label is for real images or fake images. Default: true
.. warning::
Do not use sigmoid as the last layer of Discriminator.
"""
def __init__(self, reduction='mean'):
super(VanillaGenerativeAdversarialLoss, self).__init__()
self.bce_loss = nn.BCEWithLogitsLoss(reduction=reduction)
def forward(self, prediction, real=True):
if real:
label = torch.ones_like(prediction)
else:
label = torch.zeros_like(prediction)
return self.bce_loss(prediction, label)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_0(in_out_ptr0,
in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 0.0
tmp2 = tmp1 * tmp0
tmp3 = triton_helpers.minimum(tmp1, tmp0)
tmp4 = tl_math.abs(tmp0)
tmp5 = -tmp4
tmp6 = tl_math.exp(tmp5)
tmp7 = libdevice.log1p(tmp6)
tmp8 = tmp3 - tmp7
tmp9 = tmp2 - tmp8
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 256.0
tmp14 = tmp12 / tmp13
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_0[grid(1)](buf1,
arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class VanillaGenerativeAdversarialLossNew(nn.Module):
"""
Loss for `Vanilla Generative Adversarial Network <https://arxiv.org/abs/1406.2661>`_
Args:
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- prediction (tensor): unnormalized discriminator predictions
- real (bool): if the ground truth label is for real images or fake images. Default: true
.. warning::
Do not use sigmoid as the last layer of Discriminator.
"""
def __init__(self, reduction='mean'):
super(VanillaGenerativeAdversarialLossNew, self).__init__()
self.bce_loss = nn.BCEWithLogitsLoss(reduction=reduction)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| neka-nat/Transfer-Learning-Library | VanillaGenerativeAdversarialLoss | false | 16,147 | [
"MIT"
] | 1,474 | a3b27b0d7562fa90a02e914140b37ab438469e6c | https://github.com/neka-nat/Transfer-Learning-Library/tree/a3b27b0d7562fa90a02e914140b37ab438469e6c |
LeastSquaresGenerativeAdversarialLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fn/cfnan4prpp2nynvmknv62m7csvqnx5mzj3pgx6jhbs444xjvww6z.py
# Topologically Sorted Source Nodes: [label, mse_loss], Original ATen: [aten.ones_like, aten.mse_loss]
# Source node to ATen node mapping:
# label => full_default
# mse_loss => mean, pow_1, sub
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %full_default), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_mse_loss_ones_like_0 = async_compile.triton('triton_per_fused_mse_loss_ones_like_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mse_loss_ones_like_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mse_loss_ones_like_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [label, mse_loss], Original ATen: [aten.ones_like, aten.mse_loss]
stream0 = get_raw_stream(0)
triton_per_fused_mse_loss_ones_like_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class LeastSquaresGenerativeAdversarialLoss(nn.Module):
"""
Loss for `Least Squares Generative Adversarial Network (LSGAN) <https://arxiv.org/abs/1611.04076>`_
Args:
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- prediction (tensor): unnormalized discriminator predictions
- real (bool): if the ground truth label is for real images or fake images. Default: true
.. warning::
Do not use sigmoid as the last layer of Discriminator.
"""
def __init__(self, reduction='mean'):
super(LeastSquaresGenerativeAdversarialLoss, self).__init__()
self.mse_loss = nn.MSELoss(reduction=reduction)
def forward(self, prediction, real=True):
if real:
label = torch.ones_like(prediction)
else:
label = torch.zeros_like(prediction)
return self.mse_loss(prediction, label)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_mse_loss_ones_like_0(in_out_ptr0, in_ptr0, xnumel, rnumel
):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 256.0
tmp8 = tmp6 / tmp7
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mse_loss_ones_like_0[grid(1)](buf1, arg0_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class LeastSquaresGenerativeAdversarialLossNew(nn.Module):
"""
Loss for `Least Squares Generative Adversarial Network (LSGAN) <https://arxiv.org/abs/1611.04076>`_
Args:
reduction (str, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Default: ``'mean'``
Inputs:
- prediction (tensor): unnormalized discriminator predictions
- real (bool): if the ground truth label is for real images or fake images. Default: true
.. warning::
Do not use sigmoid as the last layer of Discriminator.
"""
def __init__(self, reduction='mean'):
super(LeastSquaresGenerativeAdversarialLossNew, self).__init__()
self.mse_loss = nn.MSELoss(reduction=reduction)
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| neka-nat/Transfer-Learning-Library | LeastSquaresGenerativeAdversarialLoss | false | 16,148 | [
"MIT"
] | 1,474 | a3b27b0d7562fa90a02e914140b37ab438469e6c | https://github.com/neka-nat/Transfer-Learning-Library/tree/a3b27b0d7562fa90a02e914140b37ab438469e6c |
Theta | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/cv/ccvh5pcjc57oskvep5v2okylauufvnesitt2j6dbyvr6fh3vwuer.py
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# output => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 1.0), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7k/c7kfhnpi6ogpuakmzhvlayoy3mb73znoruxx7bdkt5awlqvsi5js.py
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# autograd_function_apply => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, 1.0), kwargs = {})
triton_poi_fused_mul_1 = async_compile.triton('triton_poi_fused_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [output], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [autograd_function_apply], Original ATen: [aten.mul]
triton_poi_fused_mul_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.autograd import Function
import torch
import torch.nn as nn
from typing import Tuple
from typing import Optional
from typing import Any
import torch.utils.data
class GradientReverseFunction(Function):
@staticmethod
def forward(ctx: 'Any', input: 'torch.Tensor', coeff: 'Optional[float]'=1.0
) ->torch.Tensor:
ctx.coeff = coeff
output = input * 1.0
return output
@staticmethod
def backward(ctx: 'Any', grad_output: 'torch.Tensor') ->Tuple[torch.
Tensor, Any]:
return grad_output.neg() * ctx.coeff, None
class GradientReverseLayer(nn.Module):
def __init__(self):
super(GradientReverseLayer, self).__init__()
def forward(self, *input):
return GradientReverseFunction.apply(*input)
class Theta(nn.Module):
"""
maximize loss respect to :math:` heta`
minimize loss respect to features
"""
def __init__(self, dim: 'int'):
super(Theta, self).__init__()
self.grl1 = GradientReverseLayer()
self.grl2 = GradientReverseLayer()
self.layer1 = nn.Linear(dim, dim)
nn.init.eye_(self.layer1.weight)
nn.init.zeros_(self.layer1.bias)
def forward(self, features: 'torch.Tensor') ->torch.Tensor:
features = self.grl1(features)
return self.grl2(self.layer1(features))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
import torch.nn as nn
from typing import Tuple
from typing import Optional
from typing import Any
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 1.0
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, buf0, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused_mul_1[grid(256)](buf2, primals_3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class GradientReverseFunction(Function):
@staticmethod
def forward(ctx: 'Any', input: 'torch.Tensor', coeff: 'Optional[float]'=1.0
) ->torch.Tensor:
ctx.coeff = coeff
output = input * 1.0
return output
@staticmethod
def backward(ctx: 'Any', grad_output: 'torch.Tensor') ->Tuple[torch.
Tensor, Any]:
return grad_output.neg() * ctx.coeff, None
class GradientReverseLayer(nn.Module):
def __init__(self):
super(GradientReverseLayer, self).__init__()
def forward(self, *input):
return GradientReverseFunction.apply(*input)
class ThetaNew(nn.Module):
"""
maximize loss respect to :math:` heta`
minimize loss respect to features
"""
def __init__(self, dim: 'int'):
super(ThetaNew, self).__init__()
self.grl1 = GradientReverseLayer()
self.grl2 = GradientReverseLayer()
self.layer1 = nn.Linear(dim, dim)
nn.init.eye_(self.layer1.weight)
nn.init.zeros_(self.layer1.bias)
def forward(self, input_0):
primals_2 = self.layer1.weight
primals_3 = self.layer1.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
| neka-nat/Transfer-Learning-Library | Theta | false | 16,149 | [
"MIT"
] | 1,474 | a3b27b0d7562fa90a02e914140b37ab438469e6c | https://github.com/neka-nat/Transfer-Learning-Library/tree/a3b27b0d7562fa90a02e914140b37ab438469e6c |
GaussianKernel | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xe/cxedqk5kxfk4utnfrixbnyqhrlbgj4i6sv6keatvftozhw66gz74.py
# Topologically Sorted Source Nodes: [sub, pow_1, l2_distance_square, neg, mean, mul, mul_1, truediv, exp], Original ATen: [aten.sub, aten.pow, aten.sum, aten.neg, aten.mean, aten.mul, aten.div, aten.exp]
# Source node to ATen node mapping:
# exp => exp
# l2_distance_square => sum_1
# mean => mean
# mul => mul
# mul_1 => mul_1
# neg => neg
# pow_1 => pow_1
# sub => sub
# truediv => div
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%unsqueeze, %unsqueeze_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [2]), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_1,), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 1.0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, 2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%neg, %mul_1), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%div,), kwargs = {})
triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0 = async_compile.triton('triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0(in_out_ptr0, in_ptr0, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16) % 4
r2 = (rindex // 64)
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (r0 + (64*r2)), None, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (16 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr0 + (32 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (48 + r0 + (64*r2)), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tmp24 = 1.0
tmp25 = tmp23 * tmp24
tmp26 = -tmp18
tmp27 = 2.0
tmp28 = tmp25 * tmp27
tmp29 = tmp26 / tmp28
tmp30 = tl_math.exp(tmp29)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp25, None)
tl.store(out_ptr1 + (tl.broadcast_to(r3, [RBLOCK])), tmp30, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sub, pow_1, l2_distance_square, neg, mean, mul, mul_1, truediv, exp], Original ATen: [aten.sub, aten.pow, aten.sum, aten.neg, aten.mean, aten.mul, aten.div, aten.exp]
stream0 = get_raw_stream(0)
triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0.run(buf2, arg0_1, buf3, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf3, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
from typing import Optional
import torch.utils.data
class GaussianKernel(nn.Module):
"""Gaussian Kernel Matrix
Gaussian Kernel k is defined by
.. math::
k(x_1, x_2) = \\exp \\left( - \\dfrac{\\| x_1 - x_2 \\|^2}{2\\sigma^2} \\right)
where :math:`x_1, x_2 \\in R^d` are 1-d tensors.
Gaussian Kernel Matrix K is defined on input group :math:`X=(x_1, x_2, ..., x_m),`
.. math::
K(X)_{i,j} = k(x_i, x_j)
Also by default, during training this layer keeps running estimates of the
mean of L2 distances, which are then used to set hyperparameter :math:`\\sigma`.
Mathematically, the estimation is :math:`\\sigma^2 = \\dfrac{\\alpha}{n^2}\\sum_{i,j} \\| x_i - x_j \\|^2`.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and use a fixed :math:`\\sigma` instead.
Args:
sigma (float, optional): bandwidth :math:`\\sigma`. Default: None
track_running_stats (bool, optional): If ``True``, this module tracks the running mean of :math:`\\sigma^2`.
Otherwise, it won't track such statistics and always uses fix :math:`\\sigma^2`. Default: ``True``
alpha (float, optional): :math:`\\alpha` which decides the magnitude of :math:`\\sigma^2` when track_running_stats is set to ``True``
Inputs:
- X (tensor): input group :math:`X`
Shape:
- Inputs: :math:`(minibatch, F)` where F means the dimension of input features.
- Outputs: :math:`(minibatch, minibatch)`
"""
def __init__(self, sigma: 'Optional[float]'=None, track_running_stats:
'Optional[bool]'=True, alpha: 'Optional[float]'=1.0):
super(GaussianKernel, self).__init__()
assert track_running_stats or sigma is not None
self.sigma_square = torch.tensor(sigma * sigma
) if sigma is not None else None
self.track_running_stats = track_running_stats
self.alpha = alpha
def forward(self, X: 'torch.Tensor') ->torch.Tensor:
l2_distance_square = ((X.unsqueeze(0) - X.unsqueeze(1)) ** 2).sum(2)
if self.track_running_stats:
self.sigma_square = self.alpha * torch.mean(l2_distance_square.
detach())
return torch.exp(-l2_distance_square / (2 * self.sigma_square))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
from typing import Optional
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0(in_out_ptr0,
in_ptr0, out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16 % 4
r2 = rindex // 64
r3 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr0 + (r0 + 64 * r2), None, eviction_policy='evict_last'
)
tmp4 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (16 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp10 = tl.load(in_ptr0 + (32 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp14 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (48 + r0 + 64 * r2), None, eviction_policy=
'evict_last')
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp6 = tmp4 - tmp5
tmp7 = tmp6 * tmp6
tmp8 = tmp3 + tmp7
tmp11 = tmp9 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tmp8 + tmp12
tmp16 = tmp14 - tmp15
tmp17 = tmp16 * tmp16
tmp18 = tmp13 + tmp17
tmp19 = tl.broadcast_to(tmp18, [RBLOCK])
tmp21 = triton_helpers.promote_to_tensor(tl.sum(tmp19, 0))
tmp22 = 256.0
tmp23 = tmp21 / tmp22
tmp24 = 1.0
tmp25 = tmp23 * tmp24
tmp26 = -tmp18
tmp27 = 2.0
tmp28 = tmp25 * tmp27
tmp29 = tmp26 / tmp28
tmp30 = tl_math.exp(tmp29)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp25, None)
tl.store(out_ptr1 + tl.broadcast_to(r3, [RBLOCK]), tmp30, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused_div_exp_mean_mul_neg_pow_sub_sum_0[grid(1)](buf2,
arg0_1, buf3, 1, 256, num_warps=2, num_stages=1)
del arg0_1
return buf3, buf2
class GaussianKernelNew(nn.Module):
"""Gaussian Kernel Matrix
Gaussian Kernel k is defined by
.. math::
k(x_1, x_2) = \\exp \\left( - \\dfrac{\\| x_1 - x_2 \\|^2}{2\\sigma^2} \\right)
where :math:`x_1, x_2 \\in R^d` are 1-d tensors.
Gaussian Kernel Matrix K is defined on input group :math:`X=(x_1, x_2, ..., x_m),`
.. math::
K(X)_{i,j} = k(x_i, x_j)
Also by default, during training this layer keeps running estimates of the
mean of L2 distances, which are then used to set hyperparameter :math:`\\sigma`.
Mathematically, the estimation is :math:`\\sigma^2 = \\dfrac{\\alpha}{n^2}\\sum_{i,j} \\| x_i - x_j \\|^2`.
If :attr:`track_running_stats` is set to ``False``, this layer then does not
keep running estimates, and use a fixed :math:`\\sigma` instead.
Args:
sigma (float, optional): bandwidth :math:`\\sigma`. Default: None
track_running_stats (bool, optional): If ``True``, this module tracks the running mean of :math:`\\sigma^2`.
Otherwise, it won't track such statistics and always uses fix :math:`\\sigma^2`. Default: ``True``
alpha (float, optional): :math:`\\alpha` which decides the magnitude of :math:`\\sigma^2` when track_running_stats is set to ``True``
Inputs:
- X (tensor): input group :math:`X`
Shape:
- Inputs: :math:`(minibatch, F)` where F means the dimension of input features.
- Outputs: :math:`(minibatch, minibatch)`
"""
def __init__(self, sigma: 'Optional[float]'=None, track_running_stats:
'Optional[bool]'=True, alpha: 'Optional[float]'=1.0):
super(GaussianKernelNew, self).__init__()
assert track_running_stats or sigma is not None
self.sigma_square = torch.tensor(sigma * sigma
) if sigma is not None else None
self.track_running_stats = track_running_stats
self.alpha = alpha
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| neka-nat/Transfer-Learning-Library | GaussianKernel | false | 16,150 | [
"MIT"
] | 1,474 | a3b27b0d7562fa90a02e914140b37ab438469e6c | https://github.com/neka-nat/Transfer-Learning-Library/tree/a3b27b0d7562fa90a02e914140b37ab438469e6c |
DivLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/tc/ctc25ciiogeyj3ememo4qh6yvwcievqofewsgojc4deqsei44rn4.py
# Topologically Sorted Source Nodes: [mu, sub, pow_1, mean_1, clamp, std, sum_1, loss_std], Original ATen: [aten.mean, aten.sub, aten.pow, aten.clamp, aten.sqrt, aten.sum, aten.neg]
# Source node to ATen node mapping:
# clamp => clamp_min
# loss_std => neg
# mean_1 => mean_1
# mu => mean
# pow_1 => pow_1
# std => sqrt
# sub => sub
# sum_1 => sum_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0]), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%pow_1, [0], True), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%mean_1, 1e-12), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sqrt,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_1,), kwargs = {})
triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0 = async_compile.triton('triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-12
tmp22 = triton_helpers.maximum(tmp20, tmp21)
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.sum(tmp24, 1)[:, None]
tmp27 = -tmp26
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp27, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mu, sub, pow_1, mean_1, clamp, std, sum_1, loss_std], Original ATen: [aten.mean, aten.sub, aten.pow, aten.clamp, aten.sqrt, aten.sum, aten.neg]
stream0 = get_raw_stream(0)
triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0.run(buf1, arg0_1, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class DivLoss(nn.Module):
def __init__(self):
super(DivLoss, self).__init__()
def forward(self, lam):
mu = lam.mean(0)
std = ((lam - mu) ** 2).mean(0, keepdim=True).clamp(min=1e-12).sqrt()
loss_std = -std.sum()
return loss_std
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0(in_out_ptr0, in_ptr0,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (64 + r0), None)
tmp3 = tl.load(in_ptr0 + (128 + r0), None)
tmp5 = tl.load(in_ptr0 + (192 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-12
tmp22 = triton_helpers.maximum(tmp20, tmp21)
tmp23 = libdevice.sqrt(tmp22)
tmp24 = tl.broadcast_to(tmp23, [XBLOCK, RBLOCK])
tmp26 = tl.sum(tmp24, 1)[:, None]
tmp27 = -tmp26
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp27, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_clamp_mean_neg_pow_sqrt_sub_sum_0[grid(1)](buf1,
arg0_1, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class DivLossNew(nn.Module):
def __init__(self):
super(DivLossNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| neka-nat/Transfer-Learning-Library | DivLoss | false | 16,151 | [
"MIT"
] | 1,474 | a3b27b0d7562fa90a02e914140b37ab438469e6c | https://github.com/neka-nat/Transfer-Learning-Library/tree/a3b27b0d7562fa90a02e914140b37ab438469e6c |
CorrelationAlignmentLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/i2/ci2cwyw4jxpalwmyllccuq6a5yjb26kh7daxxjmn3axgrehmtnb3.py
# Topologically Sorted Source Nodes: [mean_s, mean_t, sub_2, pow_1, mean_diff], Original ATen: [aten.mean, aten.sub, aten.pow]
# Source node to ATen node mapping:
# mean_diff => mean_2
# mean_s => mean
# mean_t => mean_1
# pow_1 => pow_1
# sub_2 => sub_2
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0], True), kwargs = {})
# %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg1_1, [0], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean, %mean_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
triton_per_fused_mean_pow_sub_0 = async_compile.triton('triton_per_fused_mean_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr0 + (4 + r0), None)
tmp3 = tl.load(in_ptr0 + (8 + r0), None)
tmp5 = tl.load(in_ptr0 + (12 + r0), None)
tmp9 = tl.load(in_ptr1 + (r0), None)
tmp10 = tl.load(in_ptr1 + (4 + r0), None)
tmp12 = tl.load(in_ptr1 + (8 + r0), None)
tmp14 = tl.load(in_ptr1 + (12 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.sum(tmp19, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp21, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xu/cxuzmvlqkyjljbyjwrwylrfxhfwrjpbsfgbjary2lrutdlnkenqz.py
# Topologically Sorted Source Nodes: [mean_s, cent_s], Original ATen: [aten.mean, aten.sub]
# Source node to ATen node mapping:
# cent_s => sub
# mean_s => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %mean), kwargs = {})
triton_poi_fused_mean_sub_1 = async_compile.triton('triton_poi_fused_mean_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7y/c7y3bczxpszkpgidavo5bb3dnvtxg5td2yb7lhaxaq6gi7vwtkbu.py
# Topologically Sorted Source Nodes: [mean_s, mean_t, sub_2, pow_1, mean_diff, cov_s, cov_t, sub_3, pow_2, cov_diff, add], Original ATen: [aten.mean, aten.sub, aten.pow, aten.div, aten.add]
# Source node to ATen node mapping:
# add => add
# cov_diff => mean_3
# cov_s => div
# cov_t => div_1
# mean_diff => mean_2
# mean_s => mean
# mean_t => mean_1
# pow_1 => pow_1
# pow_2 => pow_2
# sub_2 => sub_2
# sub_3 => sub_3
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [0], True), kwargs = {})
# %mean_1 : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%arg1_1, [0], True), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mean, %mean_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mean_2 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm, 3), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mm_1, 3), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%div, %div_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_3, 2), kwargs = {})
# %mean_3 : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%pow_2,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean_2, %mean_3), kwargs = {})
triton_per_fused_add_div_mean_pow_sub_2 = async_compile.triton('triton_per_fused_add_div_mean_pow_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mean_pow_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mean_pow_sub_2(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp10 = tl.load(in_out_ptr0 + (0))
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, 1])
tmp1 = 0.3333333333333333
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 16.0
tmp15 = tmp9 / tmp14
tmp16 = tmp13 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [mean_s, mean_t, sub_2, pow_1, mean_diff], Original ATen: [aten.mean, aten.sub, aten.pow]
stream0 = get_raw_stream(0)
triton_per_fused_mean_pow_sub_0.run(arg0_1, arg1_1, buf0, 1, 4, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_s, cent_s], Original ATen: [aten.mean, aten.sub]
triton_poi_fused_mean_sub_1.run(arg0_1, buf1, 16, grid=grid(16), stream=stream0)
del arg0_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (1, 4), 0), buf1, out=buf2)
buf3 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mean_t, cent_t], Original ATen: [aten.mean, aten.sub]
triton_poi_fused_mean_sub_1.run(arg1_1, buf3, 16, grid=grid(16), stream=stream0)
del arg1_1
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf3, (4, 4), (1, 4), 0), buf3, out=buf4)
del buf3
buf6 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mean_s, mean_t, sub_2, pow_1, mean_diff, cov_s, cov_t, sub_3, pow_2, cov_diff, add], Original ATen: [aten.mean, aten.sub, aten.pow, aten.div, aten.add]
triton_per_fused_add_div_mean_pow_sub_2.run(buf6, buf2, buf4, 1, 16, grid=grid(1), stream=stream0)
del buf2
del buf4
return (buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.utils.data
class CorrelationAlignmentLoss(nn.Module):
"""The `Correlation Alignment Loss` in
`Deep CORAL: Correlation Alignment for Deep Domain Adaptation (ECCV 2016) <https://arxiv.org/pdf/1607.01719.pdf>`_.
Given source features :math:`f_S` and target features :math:`f_T`, the covariance matrices are given by
.. math::
C_S = \\frac{1}{n_S-1}(f_S^Tf_S-\\frac{1}{n_S}(\\textbf{1}^Tf_S)^T(\\textbf{1}^Tf_S))
.. math::
C_T = \\frac{1}{n_T-1}(f_T^Tf_T-\\frac{1}{n_T}(\\textbf{1}^Tf_T)^T(\\textbf{1}^Tf_T))
where :math:`\\textbf{1}` denotes a column vector with all elements equal to 1, :math:`n_S, n_T` denotes number of
source and target samples, respectively. We use :math:`d` to denote feature dimension, use
:math:`{\\Vert\\cdot\\Vert}^2_F` to denote the squared matrix `Frobenius norm`. The correlation alignment loss is
given by
.. math::
l_{CORAL} = \\frac{1}{4d^2}\\Vert C_S-C_T \\Vert^2_F
Inputs:
- f_s (tensor): feature representations on source domain, :math:`f^s`
- f_t (tensor): feature representations on target domain, :math:`f^t`
Shape:
- f_s, f_t: :math:`(N, d)` where d means the dimension of input features, :math:`N=n_S=n_T` is mini-batch size.
- Outputs: scalar.
"""
def __init__(self):
super(CorrelationAlignmentLoss, self).__init__()
def forward(self, f_s: 'torch.Tensor', f_t: 'torch.Tensor') ->torch.Tensor:
mean_s = f_s.mean(0, keepdim=True)
mean_t = f_t.mean(0, keepdim=True)
cent_s = f_s - mean_s
cent_t = f_t - mean_t
cov_s = torch.mm(cent_s.t(), cent_s) / (len(f_s) - 1)
cov_t = torch.mm(cent_t.t(), cent_t) / (len(f_t) - 1)
mean_diff = (mean_s - mean_t).pow(2).mean()
cov_diff = (cov_s - cov_t).pow(2).mean()
return mean_diff + cov_diff
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_pow_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr0 + (4 + r0), None)
tmp3 = tl.load(in_ptr0 + (8 + r0), None)
tmp5 = tl.load(in_ptr0 + (12 + r0), None)
tmp9 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr1 + (4 + r0), None)
tmp12 = tl.load(in_ptr1 + (8 + r0), None)
tmp14 = tl.load(in_ptr1 + (12 + r0), None)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp11 = tmp9 + tmp10
tmp13 = tmp11 + tmp12
tmp15 = tmp13 + tmp14
tmp16 = tmp15 / tmp7
tmp17 = tmp8 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tl.broadcast_to(tmp18, [XBLOCK, RBLOCK])
tmp21 = tl.sum(tmp19, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp21, None)
@triton.jit
def triton_poi_fused_mean_sub_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_per_fused_add_div_mean_pow_sub_2(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_out_ptr0 + 0)
tmp11 = tl.broadcast_to(tmp10, [XBLOCK, 1])
tmp1 = 0.3333333333333333
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 - tmp4
tmp6 = tmp5 * tmp5
tmp7 = tl.broadcast_to(tmp6, [XBLOCK, RBLOCK])
tmp9 = tl.sum(tmp7, 1)[:, None]
tmp12 = 4.0
tmp13 = tmp11 / tmp12
tmp14 = 16.0
tmp15 = tmp9 / tmp14
tmp16 = tmp13 + tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_mean_pow_sub_0[grid(1)](arg0_1, arg1_1, buf0, 1, 4,
XBLOCK=1, num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mean_sub_1[grid(16)](arg0_1, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (4, 4), (1, 4), 0), buf1,
out=buf2)
buf3 = buf1
del buf1
triton_poi_fused_mean_sub_1[grid(16)](arg1_1, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg1_1
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf3, (4, 4), (1, 4), 0), buf3,
out=buf4)
del buf3
buf6 = buf0
del buf0
triton_per_fused_add_div_mean_pow_sub_2[grid(1)](buf6, buf2, buf4,
1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
del buf4
return buf6,
class CorrelationAlignmentLossNew(nn.Module):
"""The `Correlation Alignment Loss` in
`Deep CORAL: Correlation Alignment for Deep Domain Adaptation (ECCV 2016) <https://arxiv.org/pdf/1607.01719.pdf>`_.
Given source features :math:`f_S` and target features :math:`f_T`, the covariance matrices are given by
.. math::
C_S = \\frac{1}{n_S-1}(f_S^Tf_S-\\frac{1}{n_S}(\\textbf{1}^Tf_S)^T(\\textbf{1}^Tf_S))
.. math::
C_T = \\frac{1}{n_T-1}(f_T^Tf_T-\\frac{1}{n_T}(\\textbf{1}^Tf_T)^T(\\textbf{1}^Tf_T))
where :math:`\\textbf{1}` denotes a column vector with all elements equal to 1, :math:`n_S, n_T` denotes number of
source and target samples, respectively. We use :math:`d` to denote feature dimension, use
:math:`{\\Vert\\cdot\\Vert}^2_F` to denote the squared matrix `Frobenius norm`. The correlation alignment loss is
given by
.. math::
l_{CORAL} = \\frac{1}{4d^2}\\Vert C_S-C_T \\Vert^2_F
Inputs:
- f_s (tensor): feature representations on source domain, :math:`f^s`
- f_t (tensor): feature representations on target domain, :math:`f^t`
Shape:
- f_s, f_t: :math:`(N, d)` where d means the dimension of input features, :math:`N=n_S=n_T` is mini-batch size.
- Outputs: scalar.
"""
def __init__(self):
super(CorrelationAlignmentLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| neka-nat/Transfer-Learning-Library | CorrelationAlignmentLoss | false | 16,152 | [
"MIT"
] | 1,474 | a3b27b0d7562fa90a02e914140b37ab438469e6c | https://github.com/neka-nat/Transfer-Learning-Library/tree/a3b27b0d7562fa90a02e914140b37ab438469e6c |
Accuracy | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/bt/cbt3ifwhfqxk6gfux2vr2qvcztv4fen56aagzwb2jnxtnysnfw37.py
# Topologically Sorted Source Nodes: [pred, lab, eq, correct, float_1, truediv, acc], Original ATen: [aten.ge, aten.eq, aten.sum, aten._to_copy, aten.div, aten.mul]
# Source node to ATen node mapping:
# acc => mul
# correct => sum_1
# eq => eq
# float_1 => convert_element_type
# lab => ge_1
# pred => ge
# truediv => div
# Graph fragment:
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%arg0_1, 0.5), kwargs = {})
# %ge_1 : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%arg1_1, 0.5), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%ge, %ge_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%eq,), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%sum_1, torch.float32), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%convert_element_type, 256), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, 100.0), kwargs = {})
triton_per_fused__to_copy_div_eq_ge_mul_sum_0 = async_compile.triton('triton_per_fused__to_copy_div_eq_ge_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_div_eq_ge_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_div_eq_ge_mul_sum_0(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 0.5
tmp2 = tmp0 >= tmp1
tmp4 = tmp3 >= tmp1
tmp5 = tmp2 == tmp4
tmp6 = tmp5.to(tl.int64)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp9.to(tl.float32)
tmp11 = 0.00390625
tmp12 = tmp10 * tmp11
tmp13 = 100.0
tmp14 = tmp12 * tmp13
tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp14, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [pred, lab, eq, correct, float_1, truediv, acc], Original ATen: [aten.ge, aten.eq, aten.sum, aten._to_copy, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_div_eq_ge_mul_sum_0.run(arg0_1, arg1_1, buf1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| from torch.nn import Module
import torch
from torch import Tensor
class Accuracy(Module):
"""
Class for calculating the accuracy for a given prediction and the labels
for comparison.
Expects the inputs to be from a range of 0 to 1 and sets a crossing threshold at 0.5
the labels are similarly rounded.
"""
def forward(self, pred: 'Tensor', lab: 'Tensor') ->Tensor:
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
return Accuracy.calculate(pred, lab)
@staticmethod
def calculate(pred: 'Tensor', lab: 'Tensor'):
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
pred = pred >= 0.5
lab = lab >= 0.5
correct = (pred == lab).sum()
total = lab.numel()
acc = correct.float() / total * 100.0
return acc
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch.nn import Module
from torch import Tensor
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_div_eq_ge_mul_sum_0(in_ptr0, in_ptr1,
out_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 0.5
tmp2 = tmp0 >= tmp1
tmp4 = tmp3 >= tmp1
tmp5 = tmp2 == tmp4
tmp6 = tmp5.to(tl.int64)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tmp9.to(tl.float32)
tmp11 = 0.00390625
tmp12 = tmp10 * tmp11
tmp13 = 100.0
tmp14 = tmp12 * tmp13
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp14, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_div_eq_ge_mul_sum_0[grid(1)](arg0_1,
arg1_1, buf1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class AccuracyNew(Module):
"""
Class for calculating the accuracy for a given prediction and the labels
for comparison.
Expects the inputs to be from a range of 0 to 1 and sets a crossing threshold at 0.5
the labels are similarly rounded.
"""
@staticmethod
def calculate(pred: 'Tensor', lab: 'Tensor'):
"""
:param pred: the models prediction to compare with
:param lab: the labels for the data to compare to
:return: the calculated accuracy
"""
pred = pred >= 0.5
lab = lab >= 0.5
correct = (pred == lab).sum()
total = lab.numel()
acc = correct.float() / total * 100.0
return acc
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| neuralmagic/sparseml | Accuracy | false | 16,153 | [
"Apache-2.0"
] | 922 | 3fe5f4d75796eba43508401de4070cb494370683 | https://github.com/neuralmagic/sparseml/tree/3fe5f4d75796eba43508401de4070cb494370683 |
DiceLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/33/c33u5ltxjw6rn4li2j53yyzvr5xmqgu5ezjzwlr4xkh266pnrokm.py
# Topologically Sorted Source Nodes: [mul, sum_1, mul_1, add, sum_2, sum_3, add_1, add_2, add_3, truediv, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# mul => mul
# mul_1 => mul_1
# sub => sub
# sum_1 => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# truediv => div
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 0), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg0_1,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%arg1_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 0), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1e-07), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_3), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 0.0
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = 1e-07
tmp19 = tmp17 + tmp18
tmp20 = tmp15 / tmp19
tmp21 = 1.0
tmp22 = tmp21 - tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp22, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, sum_1, mul_1, add, sum_2, sum_3, add_1, add_2, add_3, truediv, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class DiceLoss(nn.Module):
def __init__(self, smooth=0, eps=1e-07):
super(DiceLoss, self).__init__()
self.smooth = smooth
self.eps = eps
def forward(self, output, target):
return 1 - (2 * torch.sum(output * target) + self.smooth) / (torch.
sum(output) + torch.sum(target) + self.smooth + self.eps)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp0, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.broadcast_to(tmp1, [RBLOCK])
tmp11 = triton_helpers.promote_to_tensor(tl.sum(tmp9, 0))
tmp12 = 2.0
tmp13 = tmp5 * tmp12
tmp14 = 0.0
tmp15 = tmp13 + tmp14
tmp16 = tmp8 + tmp11
tmp17 = tmp16 + tmp14
tmp18 = 1e-07
tmp19 = tmp17 + tmp18
tmp20 = tmp15 / tmp19
tmp21 = 1.0
tmp22 = tmp21 - tmp20
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp22, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
def __init__(self, smooth=0, eps=1e-07):
super(DiceLossNew, self).__init__()
self.smooth = smooth
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| neptune-ml/data-science-bowl-2018 | DiceLoss | false | 16,154 | [
"MIT"
] | 92 | 2f76f2fc9836e53ada16d1e084afa0108b119011 | https://github.com/neptune-ml/data-science-bowl-2018/tree/2f76f2fc9836e53ada16d1e084afa0108b119011 |
FocalLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mn/cmnoexayfinw3rrbjkaydy3rpcc2wfjk3illd3jankzc4n2p5omt.py
# Topologically Sorted Source Nodes: [prediction_probabilities, mul, sub, sub_1, mul_1, p_t, sub_2, modulating_factor, mul_2, sub_3, mul_3, alpha_weight_factor, mul_4, per_entry_cross_ent, focal_cross_entropy_loss, mean], Original ATen: [aten.sigmoid, aten.mul, aten.rsub, aten.add, aten.pow, aten.binary_cross_entropy_with_logits, aten.mean]
# Source node to ATen node mapping:
# alpha_weight_factor => add_1
# focal_cross_entropy_loss => mul_6
# mean => mean
# modulating_factor => pow_1
# mul => mul_1
# mul_1 => mul_2
# mul_2 => mul_3
# mul_3 => mul_4
# mul_4 => mul_5
# p_t => add
# per_entry_cross_ent => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1, sub_2
# prediction_probabilities => sigmoid
# sub => sub_3
# sub_1 => sub_4
# sub_2 => sub_5
# sub_3 => sub_6
# Graph fragment:
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg1_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %sigmoid), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %sub_4), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul_2), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %add), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_5, 2.0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.25), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, 0.75), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %add_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %sub_2), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%mul_6,), kwargs = {})
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp0
tmp6 = tmp4 - tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp9 = tmp4 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = 0.25
tmp12 = tmp0 * tmp11
tmp13 = 0.75
tmp14 = tmp5 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp10 * tmp15
tmp17 = tmp5 * tmp1
tmp18 = 0.0
tmp19 = triton_helpers.minimum(tmp18, tmp1)
tmp20 = tl_math.abs(tmp1)
tmp21 = -tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = libdevice.log1p(tmp22)
tmp24 = tmp19 - tmp23
tmp25 = tmp17 - tmp24
tmp26 = tmp16 * tmp25
tmp27 = tl.broadcast_to(tmp26, [RBLOCK])
tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0))
tmp30 = 256.0
tmp31 = tmp29 / tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp31, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [prediction_probabilities, mul, sub, sub_1, mul_1, p_t, sub_2, modulating_factor, mul_2, sub_3, mul_3, alpha_weight_factor, mul_4, per_entry_cross_ent, focal_cross_entropy_loss, mean], Original ATen: [aten.sigmoid, aten.mul, aten.rsub, aten.add, aten.pow, aten.binary_cross_entropy_with_logits, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0.run(buf1, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.optim
class FocalLoss(torch.nn.Module):
"""Sigmoid focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t)^gamma.
alpha: optional alpha weighting factor to balance positives vs negatives,
with alpha in [0, 1] for class 1 and 1-alpha for class 0.
In practice alpha may be set by inverse class frequency,
so that for a low number of positives, its weight is high.
"""
super(FocalLoss, self).__init__()
self._alpha = alpha
self._gamma = gamma
self.BCEWithLogits = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, prediction_tensor, target_tensor):
"""Compute loss function.
Args:
prediction_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing the predicted logits for each class
target_tensor: A float tensor of shape [batch_size, num_anchors,
num_classes] representing one-hot encoded classification targets.
Returns:
loss: a float tensor of shape [batch_size, num_anchors, num_classes]
representing the value of the loss function.
"""
per_entry_cross_ent = self.BCEWithLogits(prediction_tensor,
target_tensor)
prediction_probabilities = torch.sigmoid(prediction_tensor)
p_t = target_tensor * prediction_probabilities + (1 - target_tensor
) * (1 - prediction_probabilities)
modulating_factor = 1.0
if self._gamma:
modulating_factor = torch.pow(1.0 - p_t, self._gamma)
alpha_weight_factor = 1.0
if self._alpha is not None:
alpha_weight_factor = target_tensor * self._alpha + (1 -
target_tensor) * (1 - self._alpha)
focal_cross_entropy_loss = (modulating_factor * alpha_weight_factor *
per_entry_cross_ent)
return torch.mean(focal_cross_entropy_loss)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = 1.0
tmp5 = tmp4 - tmp0
tmp6 = tmp4 - tmp2
tmp7 = tmp5 * tmp6
tmp8 = tmp3 + tmp7
tmp9 = tmp4 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = 0.25
tmp12 = tmp0 * tmp11
tmp13 = 0.75
tmp14 = tmp5 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp10 * tmp15
tmp17 = tmp5 * tmp1
tmp18 = 0.0
tmp19 = triton_helpers.minimum(tmp18, tmp1)
tmp20 = tl_math.abs(tmp1)
tmp21 = -tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = libdevice.log1p(tmp22)
tmp24 = tmp19 - tmp23
tmp25 = tmp17 - tmp24
tmp26 = tmp16 * tmp25
tmp27 = tl.broadcast_to(tmp26, [RBLOCK])
tmp29 = triton_helpers.promote_to_tensor(tl.sum(tmp27, 0))
tmp30 = 256.0
tmp31 = tmp29 / tmp30
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp31, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_with_logits_mean_mul_pow_rsub_sigmoid_0[
grid(1)](buf1, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class FocalLossNew(torch.nn.Module):
"""Sigmoid focal cross entropy loss.
Focal loss down-weights well classified examples and focusses on the hard
examples. See https://arxiv.org/pdf/1708.02002.pdf for the loss definition.
"""
def __init__(self, gamma=2.0, alpha=0.25):
"""Constructor.
Args:
gamma: exponent of the modulating factor (1 - p_t)^gamma.
alpha: optional alpha weighting factor to balance positives vs negatives,
with alpha in [0, 1] for class 1 and 1-alpha for class 0.
In practice alpha may be set by inverse class frequency,
so that for a low number of positives, its weight is high.
"""
super(FocalLossNew, self).__init__()
self._alpha = alpha
self._gamma = gamma
self.BCEWithLogits = nn.BCEWithLogitsLoss(reduction='none')
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| neuralsyn/self-supervised-relational-reasoning | FocalLoss | false | 16,155 | [
"MIT"
] | 130 | 6ecfafcf4a36c2eacef7ddd5bd1b23c28fbb14c8 | https://github.com/neuralsyn/self-supervised-relational-reasoning/tree/6ecfafcf4a36c2eacef7ddd5bd1b23c28fbb14c8 |
StackTime | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ao/caoovxtqrx42gvkmjirowqmmbh6kppvfh5ebrzzv4kzkgwm2umii.py
# Topologically Sorted Source Nodes: [r_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# r_2 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vc/cvchxawa6ugiimgj4cxrq33nbcizxy4qbfaisgrluclblyp36bzs.py
# Topologically Sorted Source Nodes: [truediv, ceil, x_lens], Original ATen: [aten.div, aten.ceil, aten._to_copy]
# Source node to ATen node mapping:
# ceil => ceil
# truediv => div
# x_lens => convert_element_type
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg1_1, 4), kwargs = {})
# %ceil : [num_users=1] = call_function[target=torch.ops.aten.ceil.default](args = (%div,), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ceil, torch.int32), kwargs = {})
triton_poi_fused__to_copy_ceil_div_1 = async_compile.triton('triton_poi_fused__to_copy_ceil_div_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_ceil_div_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_ceil_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = libdevice.ceil(tmp2)
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [r_2], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int32)
# Topologically Sorted Source Nodes: [truediv, ceil, x_lens], Original ATen: [aten.div, aten.ceil, aten._to_copy]
triton_poi_fused__to_copy_ceil_div_1.run(arg1_1, buf1, 256, grid=grid(256), stream=stream0)
del arg1_1
return (reinterpret_tensor(buf0, (1, 4, 16), (16, 16, 1), 0), buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
from torchvision.models.quantization import *
class StackTime(torch.nn.Module):
__constants__ = ['factor']
def __init__(self, factor):
super().__init__()
self.factor = int(factor)
def forward(self, x, x_lens):
r = torch.transpose(x, 0, 1)
s = r.shape
zeros = torch.zeros(s[0], -s[1] % self.factor, s[2], dtype=r.dtype,
device=r.device)
r = torch.cat([r, zeros], 1)
s = r.shape
rs = [s[0], s[1] // self.factor, s[2] * self.factor]
r = torch.reshape(r, rs)
rt = torch.transpose(r, 0, 1)
x_lens = torch.ceil(x_lens.float() / self.factor).int()
return rt, x_lens
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'factor': 4}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torchvision.models.quantization import *
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_ceil_div_1(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.25
tmp2 = tmp0 * tmp1
tmp3 = libdevice.ceil(tmp2)
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.int32)
triton_poi_fused__to_copy_ceil_div_1[grid(256)](arg1_1, buf1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg1_1
return reinterpret_tensor(buf0, (1, 4, 16), (16, 16, 1), 0), buf1
class StackTimeNew(torch.nn.Module):
__constants__ = ['factor']
def __init__(self, factor):
super().__init__()
self.factor = int(factor)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0], output[1]
| mlperf/inference | StackTime | false | 16,156 | [
"Apache-2.0"
] | 388 | b284212941bcce7a558813c31ba2d356bac71885 | https://github.com/mlperf/inference/tree/b284212941bcce7a558813c31ba2d356bac71885 |
SVHN_Net | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qp/cqpf3ql535lobs6mc6l5mep65wu2cj446ohcw5e75cpwggiavfgv.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 900) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cd/ccdxp4wpoqezdz56jia2e5dc2s5bah44armuzkhg5bovjbi5cqd7.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 100352
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 784) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gs/cgsibmeka3xteijkq6vqbjw3vgnmfpj46iitkptham3s6ipt67j5.py
# Topologically Sorted Source Nodes: [max_pool2d, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
# Source node to ATen node mapping:
# max_pool2d => _low_memory_max_pool2d_with_offsets, getitem_1
# x_1 => relu_1
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_2 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = (xindex // 14)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp18, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7p/c7plbf6kssbjkalccawbmgjpe5rrcjycysrqw3wip5zje6prwufi.py
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 144) % 32
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kq/ckqtc6evznpvt66auwjtyzsdvy37svyd3o5vqd6366jxzzhwigc4.py
# Topologically Sorted Source Nodes: [max_pool2d_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# max_pool2d_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# x_2 => relu_2
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_2, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%getitem_2,), kwargs = {})
# %le_2 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_2, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: '*i1', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (24*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (24*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (12 + (2*x0) + (24*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (13 + (2*x0) + (24*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp18, xmask)
tl.store(out_ptr2 + (x2), tmp20, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/s6/cs6uixuclz3iflcd2ut5zzg5bihuhoj7aytcc4zsqzbqf4jcmzz5.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# x_4 => relu_3
# Graph fragment:
# %add_tensor_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_9), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor_1,), kwargs = {})
triton_poi_fused_relu_5 = async_compile.triton('triton_poi_fused_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yf/cyfo4sqhnsbahrlhepn6smzctzxkgb4savu5zwogcrmtvus52gk3.py
# Topologically Sorted Source Nodes: [e1], Original ATen: [aten.relu]
# Source node to ATen node mapping:
# e1 => relu_4
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_11), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
triton_poi_fused_relu_6 = async_compile.triton('triton_poi_fused_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32, ), (1, ))
assert_size_stride(primals_8, (400, 1152), (1152, 1))
assert_size_stride(primals_9, (400, ), (1, ))
assert_size_stride(primals_10, (50, 400), (400, 1))
assert_size_stride(primals_11, (50, ), (1, ))
assert_size_stride(primals_12, (10, 50), (50, 1))
assert_size_stride(primals_13, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 30, 30), (28800, 900, 30, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_2, 115200, grid=grid(115200), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 28, 28), (25088, 784, 28, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf3, primals_5, 100352, grid=grid(100352), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1), torch.int8)
buf5 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d, x_1], Original ATen: [aten.max_pool2d_with_indices, aten.relu]
triton_poi_fused_max_pool2d_with_indices_relu_2.run(buf3, buf4, buf5, 25088, grid=grid(25088), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 12, 12), (4608, 144, 12, 1))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf7, primals_7, 18432, grid=grid(18432), stream=stream0)
del primals_7
buf8 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.int8)
buf9 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.float32)
buf15 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.bool)
# Topologically Sorted Source Nodes: [max_pool2d_1, x_2], Original ATen: [aten.max_pool2d_with_indices, aten.relu, aten.threshold_backward]
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4.run(buf7, buf8, buf9, buf15, 4608, grid=grid(4608), stream=stream0)
buf10 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf9, (4, 1152), (1152, 1), 0), reinterpret_tensor(primals_8, (1152, 400), (1, 1152), 0), out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.relu]
triton_poi_fused_relu_5.run(buf11, primals_9, 1600, grid=grid(1600), stream=stream0)
del primals_9
buf12 = empty_strided_cuda((4, 50), (50, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf11, reinterpret_tensor(primals_10, (400, 50), (1, 400), 0), out=buf12)
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [e1], Original ATen: [aten.relu]
triton_poi_fused_relu_6.run(buf13, primals_11, 200, grid=grid(200), stream=stream0)
del primals_11
buf14 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, buf13, reinterpret_tensor(primals_12, (50, 10), (1, 50), 0), alpha=1, beta=1, out=buf14)
del primals_13
return (buf14, buf13, primals_1, primals_3, primals_4, primals_6, buf1, buf3, buf4, buf5, buf7, buf8, reinterpret_tensor(buf9, (4, 1152), (1152, 1), 0), buf11, buf13, primals_12, primals_10, primals_8, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 3, 3, 3), (27, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((32, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((400, 1152), (1152, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((400, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((50, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((50, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((10, 50), (50, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
class SVHN_Net(nn.Module):
def __init__(self):
super(SVHN_Net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3)
self.conv3 = nn.Conv2d(32, 32, kernel_size=3)
self.conv3_drop = nn.Dropout2d()
self.fc1 = nn.Linear(1152, 400)
self.fc2 = nn.Linear(400, 50)
self.fc3 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(F.max_pool2d(self.conv2(x), 2))
x = F.relu(F.max_pool2d(self.conv3_drop(self.conv3(x)), 2))
x = x.view(-1, 1152)
x = F.relu(self.fc1(x))
e1 = F.relu(self.fc2(x))
x = F.dropout(e1, training=self.training)
x = self.fc3(x)
return x, e1
def get_embedding_dim(self):
return 50
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 900 % 32
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 784 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_2(in_ptr0, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 25088
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x1 = xindex // 14
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp18, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 144 % 32
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4(in_ptr0,
out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 4608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 24 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 24 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (12 + 2 * x0 + 24 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (13 + 2 * x0 + 24 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tmp17 = tl.full([1], 0, tl.int32)
tmp18 = triton_helpers.maximum(tmp17, tmp16)
tmp19 = 0.0
tmp20 = tmp18 <= tmp19
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp18, xmask)
tl.store(out_ptr2 + x2, tmp20, xmask)
@triton.jit
def triton_poi_fused_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 400
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 50
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 3, 3, 3), (27, 9, 3, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (32, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (32,), (1,))
assert_size_stride(primals_8, (400, 1152), (1152, 1))
assert_size_stride(primals_9, (400,), (1,))
assert_size_stride(primals_10, (50, 400), (400, 1))
assert_size_stride(primals_11, (50,), (1,))
assert_size_stride(primals_12, (10, 50), (50, 1))
assert_size_stride(primals_13, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 30, 30), (28800, 900, 30, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(115200)](buf1, primals_2,
115200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 32, 28, 28), (25088, 784, 28, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_1[grid(100352)](buf3, primals_5,
100352, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1),
torch.int8)
buf5 = empty_strided_cuda((4, 32, 14, 14), (6272, 196, 14, 1),
torch.float32)
triton_poi_fused_max_pool2d_with_indices_relu_2[grid(25088)](buf3,
buf4, buf5, 25088, XBLOCK=128, num_warps=4, num_stages=1)
buf6 = extern_kernels.convolution(buf5, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 32, 12, 12), (4608, 144, 12, 1))
buf7 = buf6
del buf6
triton_poi_fused_convolution_3[grid(18432)](buf7, primals_7, 18432,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf8 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.int8)
buf9 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.
float32)
buf15 = empty_strided_cuda((4, 32, 6, 6), (1152, 36, 6, 1), torch.bool)
triton_poi_fused_max_pool2d_with_indices_relu_threshold_backward_4[grid
(4608)](buf7, buf8, buf9, buf15, 4608, XBLOCK=128, num_warps=4,
num_stages=1)
buf10 = empty_strided_cuda((4, 400), (400, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf9, (4, 1152), (1152, 1), 0),
reinterpret_tensor(primals_8, (1152, 400), (1, 1152), 0), out=buf10
)
buf11 = buf10
del buf10
triton_poi_fused_relu_5[grid(1600)](buf11, primals_9, 1600, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_9
buf12 = empty_strided_cuda((4, 50), (50, 1), torch.float32)
extern_kernels.mm(buf11, reinterpret_tensor(primals_10, (400, 50),
(1, 400), 0), out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_relu_6[grid(200)](buf13, primals_11, 200, XBLOCK=
256, num_warps=4, num_stages=1)
del primals_11
buf14 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_13, buf13, reinterpret_tensor(
primals_12, (50, 10), (1, 50), 0), alpha=1, beta=1, out=buf14)
del primals_13
return (buf14, buf13, primals_1, primals_3, primals_4, primals_6, buf1,
buf3, buf4, buf5, buf7, buf8, reinterpret_tensor(buf9, (4, 1152), (
1152, 1), 0), buf11, buf13, primals_12, primals_10, primals_8, buf15)
class SVHN_NetNew(nn.Module):
def __init__(self):
super(SVHN_NetNew, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3)
self.conv2 = nn.Conv2d(32, 32, kernel_size=3)
self.conv3 = nn.Conv2d(32, 32, kernel_size=3)
self.conv3_drop = nn.Dropout2d()
self.fc1 = nn.Linear(1152, 400)
self.fc2 = nn.Linear(400, 50)
self.fc3 = nn.Linear(50, 10)
def get_embedding_dim(self):
return 50
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.fc1.weight
primals_9 = self.fc1.bias
primals_10 = self.fc2.weight
primals_11 = self.fc2.bias
primals_12 = self.fc3.weight
primals_13 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
| nannullna/deep-active-learning | SVHN_Net | false | 16,157 | [
"MIT"
] | 465 | c54a995640c63ba4679129c5a1fd5cec9a2858e6 | https://github.com/nannullna/deep-active-learning/tree/c54a995640c63ba4679129c5a1fd5cec9a2858e6 |
TripletLoss | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pn/cpna4n3g6gpu2paowpo7xvvdimeicnhmebfegjcmy4wk3wodlry4.py
# Topologically Sorted Source Nodes: [add, mul, dist_mat, clamp, dist_mat_1, eq, identity_mat, sub_1, mul_1, add_1, sort, mul_2, add_2, sort_1], Original ATen: [aten.add, aten.mul, aten.sub, aten.clamp, aten.sqrt, aten.eq, aten._to_copy, aten.rsub, aten.sort]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# clamp => clamp_min
# dist_mat => sub
# dist_mat_1 => sqrt
# eq => eq
# identity_mat => convert_element_type
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# sort => sort
# sort_1 => sort_1
# sub_1 => sub_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%expand, %permute), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mul), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 1e-12), kwargs = {})
# %sqrt : [num_users=2] = call_function[target=torch.ops.aten.sqrt.default](args = (%clamp_min,), kwargs = {})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Tensor](args = (%expand_2, %permute_2), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, -10000000.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, %mul_1), kwargs = {})
# %sort : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%add_1, 1, True), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, 10000000.0), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, %mul_2), kwargs = {})
# %sort_1 : [num_users=1] = call_function[target=torch.ops.aten.sort.default](args = (%add_2, 1), kwargs = {})
triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0 = async_compile.triton('triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 4],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4*r1), None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + (4*r1)), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + (4*r1)), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (3 + (4*r1)), None, eviction_policy='evict_last')
tmp23 = tl.load(in_out_ptr0 + (r1 + (4*x0)), xmask, other=0.0)
tmp30 = tl.load(in_ptr1 + (r1 + (4*x0)), xmask, other=0.0)
tmp31 = tl.load(in_ptr1 + (x0 + (4*r1)), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 + tmp21
tmp24 = 2.0
tmp25 = tmp23 * tmp24
tmp26 = tmp22 - tmp25
tmp27 = 1e-12
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = libdevice.sqrt(tmp28)
tmp32 = tmp30 == tmp31
tmp33 = tmp32.to(tl.float32)
tmp34 = 1.0
tmp35 = tmp34 - tmp33
tmp36 = -10000000.0
tmp37 = tmp35 * tmp36
tmp38 = tmp29 + tmp37
tmp39 = r1
tmp40 = tmp39.to(tl.int16)
tmp41 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp42 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp43, tmp44, = triton_helpers.sort_with_index(tmp41, tmp42, None, 1, stable=False, descending=True)
tmp45 = 10000000.0
tmp46 = tmp33 * tmp45
tmp47 = tmp29 + tmp46
tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK])
tmp49, tmp50, = triton_helpers.sort_with_index(tmp48, tmp42, None, 1, stable=False, descending=False)
tl.store(in_out_ptr0 + (r1 + (4*x0)), tmp26, xmask)
tl.store(out_ptr0 + (r1 + (4*x0)), tmp43, xmask)
tl.store(out_ptr1 + (r1 + (4*x0)), tmp49, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/e3/ce32pujiahet4pqxynqfc4b7tbaczqldo5uzescozpvluutc4mbw.py
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.neg, aten.sub, aten.mul, aten.add, aten.clamp_min, aten.mean]
# Source node to ATen node mapping:
# loss => add_3, clamp_min_1, full_default, mean, mul_3, sub_2
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], -1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%select_1, %select), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%full_default, %sub_2), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Scalar](args = (%mul_3, 4.0), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add_3, 0), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%clamp_min_1,), kwargs = {})
triton_per_fused_add_clamp_min_mean_mul_neg_sub_1 = async_compile.triton('triton_per_fused_add_clamp_min_mean_mul_neg_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 4],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_min_mean_mul_neg_sub_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_min_mean_mul_neg_sub_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (4*r0), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*r0), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = -1.0
tmp4 = tmp3 * tmp2
tmp5 = 4.0
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp11 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.mm]
extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4), 0), out=buf0)
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, mul, dist_mat, clamp, dist_mat_1, eq, identity_mat, sub_1, mul_1, add_1, sort, mul_2, add_2, sort_1], Original ATen: [aten.add, aten.mul, aten.sub, aten.clamp, aten.sqrt, aten.eq, aten._to_copy, aten.rsub, aten.sort]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0.run(buf1, arg0_1, arg1_1, buf2, buf4, 4, 4, grid=grid(4), stream=stream0)
del arg0_1
del arg1_1
del buf1
buf6 = empty_strided_cuda((), (), torch.float32)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [loss], Original ATen: [aten.neg, aten.sub, aten.mul, aten.add, aten.clamp_min, aten.mean]
triton_per_fused_add_clamp_min_mean_mul_neg_sub_1.run(buf7, buf4, buf2, 1, 4, grid=grid(1), stream=stream0)
del buf2
del buf4
return (buf7, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms.functional as F
import torch.utils.data
def hard_examples_mining(dist_mat, identity_mat, return_idxes=False):
"""Select hard positives and hard negatives according to `In defense of the Triplet Loss for Person
Re-Identification (ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_
Args:
dist_mat (tensor): pairwise distance matrix between two sets of features
identity_mat (tensor): a matrix of shape :math:`(N, M)`. If two images :math:`P[i]` of set :math:`P` and
:math:`Q[j]` of set :math:`Q` come from the same person, then :math:`identity\\_mat[i, j] = 1`,
otherwise :math:`identity\\_mat[i, j] = 0`
return_idxes (bool, optional): if True, also return indexes of hard examples. Default: False
"""
sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + -10000000.0 * (1 -
identity_mat), dim=1, descending=True)
dist_ap = sorted_dist_mat[:, 0]
hard_positive_idxes = sorted_idxes[:, 0]
sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + 10000000.0 *
identity_mat, dim=1, descending=False)
dist_an = sorted_dist_mat[:, 0]
hard_negative_idxes = sorted_idxes[:, 0]
if return_idxes:
return dist_ap, dist_an, hard_positive_idxes, hard_negative_idxes
return dist_ap, dist_an
def pairwise_euclidean_distance(x, y):
"""Compute pairwise euclidean distance between two sets of features"""
m, n = x.size(0), y.size(0)
dist_mat = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) + torch.pow(y,
2).sum(1, keepdim=True).expand(n, m).t() - 2 * torch.matmul(x, y.t())
dist_mat = dist_mat.clamp(min=1e-12).sqrt()
return dist_mat
class TripletLoss(nn.Module):
"""Triplet loss augmented with batch hard from `In defense of the Triplet Loss for Person Re-Identification
(ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_.
Args:
margin (float): margin of triplet loss
normalize_feature (bool, optional): if True, normalize features into unit norm first before computing loss.
Default: False.
"""
def __init__(self, margin, normalize_feature=False):
super(TripletLoss, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.margin_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, f, labels):
if self.normalize_feature:
f = F.normalize(f)
dist_mat = pairwise_euclidean_distance(f, f)
n = dist_mat.size(0)
identity_mat = labels.expand(n, n).eq(labels.expand(n, n).t()).float()
dist_ap, dist_an = hard_examples_mining(dist_mat, identity_mat)
y = torch.ones_like(dist_ap)
loss = self.margin_loss(dist_an, dist_ap, y)
return loss
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'margin': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0(in_out_ptr0
, in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, rnumel, XBLOCK: tl.
constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r1 = rindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + 4 * r1, None, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (1 + 4 * r1), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (2 + 4 * r1), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (3 + 4 * r1), None, eviction_policy='evict_last')
tmp23 = tl.load(in_out_ptr0 + (r1 + 4 * x0), xmask, other=0.0)
tmp30 = tl.load(in_ptr1 + (r1 + 4 * x0), xmask, other=0.0)
tmp31 = tl.load(in_ptr1 + (x0 + 4 * r1), xmask, other=0.0)
tmp1 = tmp0 * tmp0
tmp3 = tmp2 * tmp2
tmp4 = tmp1 + tmp3
tmp6 = tmp5 * tmp5
tmp7 = tmp4 + tmp6
tmp9 = tmp8 * tmp8
tmp10 = tmp7 + tmp9
tmp12 = tmp11 * tmp11
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp10 + tmp21
tmp24 = 2.0
tmp25 = tmp23 * tmp24
tmp26 = tmp22 - tmp25
tmp27 = 1e-12
tmp28 = triton_helpers.maximum(tmp26, tmp27)
tmp29 = libdevice.sqrt(tmp28)
tmp32 = tmp30 == tmp31
tmp33 = tmp32.to(tl.float32)
tmp34 = 1.0
tmp35 = tmp34 - tmp33
tmp36 = -10000000.0
tmp37 = tmp35 * tmp36
tmp38 = tmp29 + tmp37
tmp39 = r1
tmp40 = tmp39.to(tl.int16)
tmp41 = tl.broadcast_to(tmp38, [XBLOCK, RBLOCK])
tmp42 = tl.broadcast_to(tmp40, [XBLOCK, RBLOCK])
tmp43, _tmp44 = triton_helpers.sort_with_index(tmp41, tmp42, None, 1,
stable=False, descending=True)
tmp45 = 10000000.0
tmp46 = tmp33 * tmp45
tmp47 = tmp29 + tmp46
tmp48 = tl.broadcast_to(tmp47, [XBLOCK, RBLOCK])
tmp49, _tmp50 = triton_helpers.sort_with_index(tmp48, tmp42, None, 1,
stable=False, descending=False)
tl.store(in_out_ptr0 + (r1 + 4 * x0), tmp26, xmask)
tl.store(out_ptr0 + (r1 + 4 * x0), tmp43, xmask)
tl.store(out_ptr1 + (r1 + 4 * x0), tmp49, xmask)
@triton.jit
def triton_per_fused_add_clamp_min_mean_mul_neg_sub_1(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 4
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + 4 * r0, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * r0, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp3 = -1.0
tmp4 = tmp3 * tmp2
tmp5 = 4.0
tmp6 = tmp4 + tmp5
tmp7 = 0.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tl.broadcast_to(tmp8, [XBLOCK, RBLOCK])
tmp11 = tl.sum(tmp9, 1)[:, None]
tmp12 = tmp11 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg0_1, reinterpret_tensor(arg0_1, (4, 4), (1, 4),
0), out=buf0)
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__to_copy_add_clamp_eq_mul_rsub_sort_sqrt_sub_0[grid(4)
](buf1, arg0_1, arg1_1, buf2, buf4, 4, 4, XBLOCK=1, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del buf1
buf6 = empty_strided_cuda((), (), torch.float32)
buf7 = buf6
del buf6
triton_per_fused_add_clamp_min_mean_mul_neg_sub_1[grid(1)](buf7,
buf4, buf2, 1, 4, XBLOCK=1, num_warps=2, num_stages=1)
del buf2
del buf4
return buf7,
def hard_examples_mining(dist_mat, identity_mat, return_idxes=False):
"""Select hard positives and hard negatives according to `In defense of the Triplet Loss for Person
Re-Identification (ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_
Args:
dist_mat (tensor): pairwise distance matrix between two sets of features
identity_mat (tensor): a matrix of shape :math:`(N, M)`. If two images :math:`P[i]` of set :math:`P` and
:math:`Q[j]` of set :math:`Q` come from the same person, then :math:`identity\\_mat[i, j] = 1`,
otherwise :math:`identity\\_mat[i, j] = 0`
return_idxes (bool, optional): if True, also return indexes of hard examples. Default: False
"""
sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + -10000000.0 * (1 -
identity_mat), dim=1, descending=True)
dist_ap = sorted_dist_mat[:, 0]
hard_positive_idxes = sorted_idxes[:, 0]
sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + 10000000.0 *
identity_mat, dim=1, descending=False)
dist_an = sorted_dist_mat[:, 0]
hard_negative_idxes = sorted_idxes[:, 0]
if return_idxes:
return dist_ap, dist_an, hard_positive_idxes, hard_negative_idxes
return dist_ap, dist_an
def pairwise_euclidean_distance(x, y):
"""Compute pairwise euclidean distance between two sets of features"""
m, n = x.size(0), y.size(0)
dist_mat = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) + torch.pow(y,
2).sum(1, keepdim=True).expand(n, m).t() - 2 * torch.matmul(x, y.t())
dist_mat = dist_mat.clamp(min=1e-12).sqrt()
return dist_mat
class TripletLossNew(nn.Module):
"""Triplet loss augmented with batch hard from `In defense of the Triplet Loss for Person Re-Identification
(ICCV 2017) <https://arxiv.org/pdf/1703.07737v2.pdf>`_.
Args:
margin (float): margin of triplet loss
normalize_feature (bool, optional): if True, normalize features into unit norm first before computing loss.
Default: False.
"""
def __init__(self, margin, normalize_feature=False):
super(TripletLossNew, self).__init__()
self.margin = margin
self.normalize_feature = normalize_feature
self.margin_loss = nn.MarginRankingLoss(margin=margin)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
| neka-nat/Transfer-Learning-Library | TripletLoss | false | 16,158 | [
"MIT"
] | 1,474 | a3b27b0d7562fa90a02e914140b37ab438469e6c | https://github.com/neka-nat/Transfer-Learning-Library/tree/a3b27b0d7562fa90a02e914140b37ab438469e6c |
MockOpticalFlowModel | # AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ie/ciettq2a3562jfpgfe75iig4ki2hbm6pmbwujlvp6mw26i2odufm.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], -3), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 16) % 8
x0 = xindex % 16
x2 = (xindex // 128)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x2)), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (x0 + (16*((-4) + x1)) + (64*x2)), tmp6 & xmask, other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ux/cuxl4w5ahe22wcdzvtqt4gqw3uhif73qjwuhcjounr3wj3h6igzt.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_3, %primals_4, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 2
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (2, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_4, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 512, grid=grid(512), stream=stream0)
del primals_1
del primals_2
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 2, 4, 4), (32, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_4, 128, grid=grid(128), stream=stream0)
del primals_4
return (buf2, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((2, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.nn as nn
class MockOpticalFlowModel(nn.Module):
def __init__(self, img_channels):
super().__init__()
self.model = nn.Conv2d(img_channels * 2, 2, kernel_size=1)
def forward(self, img1, img2):
x = torch.cat([img1, img2], dim=-3)
return self.model(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'img_channels': 4}]
| import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 16 % 8
x0 = xindex % 16
x2 = xindex // 128
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x2), tmp4 & xmask, other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (x0 + 16 * (-4 + x1) + 64 * x2), tmp6 & xmask,
other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 2
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (2, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_4, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](primals_1, primals_2, buf0, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
del primals_2
buf1 = extern_kernels.convolution(buf0, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 2, 4, 4), (32, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(128)](buf2, primals_4, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_4
return buf2, primals_3, buf0
class MockOpticalFlowModelNew(nn.Module):
def __init__(self, img_channels):
super().__init__()
self.model = nn.Conv2d(img_channels * 2, 2, kernel_size=1)
def forward(self, input_0, input_1):
primals_3 = self.model.weight
primals_4 = self.model.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
| neu-vig/ezflow | MockOpticalFlowModel | false | 16,159 | [
"MIT"
] | 94 | 1eb6f675e72b1de6db7b35d61ca4ef0082bae890 | https://github.com/neu-vig/ezflow/tree/1eb6f675e72b1de6db7b35d61ca4ef0082bae890 |
Softplus | # AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/sa/csap5cthikzuwdmqu2ehow73ilvezq4ym46wfjbhn3cebigfe7xy.py
# Topologically Sorted Source Nodes: [softplus], Original ATen: [aten.softplus]
# Source node to ATen node mapping:
# softplus => div, exp, gt, log1p, mul, where
# Graph fragment:
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mul, 20.0), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%mul,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%log1p, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %div), kwargs = {})
triton_poi_fused_softplus_0 = async_compile.triton('triton_poi_fused_softplus_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_softplus_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softplus], Original ATen: [aten.softplus]
stream0 = get_raw_stream(0)
triton_poi_fused_softplus_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
| import torch
import torch.onnx
import torch.nn as nn
class Softplus(nn.Module):
def forward(self, x):
return torch.nn.Softplus()(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
| import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.onnx
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_softplus_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = 20.0
tmp4 = tmp2 > tmp3
tmp5 = tl_math.exp(tmp2)
tmp6 = libdevice.log1p(tmp5)
tmp7 = tmp6 * tmp1
tmp8 = tl.where(tmp4, tmp0, tmp7)
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_softplus_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class SoftplusNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
| mil-tokyo/webdnn | Softplus | false | 16,160 | [
"MIT"
] | 1,967 | 38a60fd3e1a4e72bc01108189a3aa51e0752aecd | https://github.com/mil-tokyo/webdnn/tree/38a60fd3e1a4e72bc01108189a3aa51e0752aecd |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.