entry_point
stringlengths 1
65
| original_triton_code
stringlengths 4.5k
619k
| python_code
stringlengths 208
60.9k
| triton_code
stringlengths 1.15k
275k
| repo_name
stringlengths 7
115
| module_name
stringlengths 1
65
| synthetic
bool 1
class | uuid
int64 0
18.5k
| licenses
listlengths 1
6
| stars
int64 0
19.8k
| sha
stringlengths 40
40
| repo_link
stringlengths 72
180
|
---|---|---|---|---|---|---|---|---|---|---|---|
SAGEAggregator
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/hh/chh6c5w5qa6uf7vojzls7kg4by5riqn4sgtlt67ukhrqv4nd6zcl.py
# Topologically Sorted Source Nodes: [neigh_x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# neigh_x => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_2, [1]), kwargs = {})
triton_poi_fused_mean_0 = async_compile.triton('triton_poi_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mean_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4v/c4vqcwddthr6wbxsjocrtk3nomisu65g2sfrwscprcbyovu47fev.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
# Source node to ATen node mapping:
# out => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %view_1), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neigh_x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_poi_fused_mean_0.run(primals_2, buf0, 64, grid=grid(64), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [neigh_x_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf3, buf1, 256, grid=grid(256), stream=stream0)
del buf1
return (buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class SAGEAggregator(nn.Module):
def __init__(self, in_features, out_features, agg_method='mean', concat
=False, bias=False):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.concat = concat
self.agg_method = agg_method
self.aggregator = {'mean': torch.mean, 'sum': torch.sum}[agg_method]
self.lin_l = nn.Linear(in_features, out_features, bias=bias)
self.lin_r = nn.Linear(in_features, out_features, bias=bias)
def reset_parameters(self):
self.lin_l.reset_parameters()
self.lin_r.reset_parameters()
def forward(self, x, neigh_x):
if not isinstance(x, torch.Tensor):
x = torch.cat(x, dim=0)
if not isinstance(neigh_x, torch.Tensor):
neigh_x = torch.cat([self.aggregator(h, dim=1) for h in neigh_x
], dim=0)
else:
neigh_x = self.aggregator(neigh_x, dim=1)
neigh_x = self.lin_r(neigh_x)
x = self.lin_l(x)
out = torch.cat([x, neigh_x], dim=1) if self.concat else x + neigh_x
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_features}, {self.out_features})'
)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mean_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mean_0[grid(64)](primals_2, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
del primals_4
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_1[grid(256)](buf3, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf1
return buf3, reinterpret_tensor(buf0, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0)
class SAGEAggregatorNew(nn.Module):
def __init__(self, in_features, out_features, agg_method='mean', concat
=False, bias=False):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.concat = concat
self.agg_method = agg_method
self.aggregator = {'mean': torch.mean, 'sum': torch.sum}[agg_method]
self.lin_l = nn.Linear(in_features, out_features, bias=bias)
self.lin_r = nn.Linear(in_features, out_features, bias=bias)
def reset_parameters(self):
self.lin_l.reset_parameters()
self.lin_r.reset_parameters()
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_features}, {self.out_features})'
)
def forward(self, input_0, input_1):
primals_3 = self.lin_l.weight
primals_4 = self.lin_r.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
EdisonLeeeee/GraphGallery
|
SAGEAggregator
| false | 13,634 |
[
"MIT"
] | 300 |
4eec9c5136bda14809bd22584b26cc346cdb633b
|
https://github.com/EdisonLeeeee/GraphGallery/tree/4eec9c5136bda14809bd22584b26cc346cdb633b
|
HingeLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/kg/ckgjqsaicrgf6lzcklj5dopswm4cuvv23skrpdqbl2wjnu2wrlru.py
# Topologically Sorted Source Nodes: [mul, output, setitem, mean], Original ATen: [aten.mul, aten.rsub, aten.lift_fresh, aten.index_put, aten.mean]
# Source node to ATen node mapping:
# mean => mean
# mul => mul
# output => sub
# setitem => full_default, index_put
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cpu, pin_memory: False})
# %index_put : [num_users=1] = call_function[target=torch.ops.aten.index_put_.default](args = (%sub, [%le], %full_default), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%index_put,), kwargs = {})
triton_per_fused_index_put_lift_fresh_mean_mul_rsub_0 = async_compile.triton('triton_per_fused_index_put_lift_fresh_mean_mul_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_index_put_lift_fresh_mean_mul_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_index_put_lift_fresh_mean_mul_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp2
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = tl.where(tmp6, tmp5, tmp4)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp12, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mul, output, setitem, mean], Original ATen: [aten.mul, aten.rsub, aten.lift_fresh, aten.index_put, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_index_put_lift_fresh_mean_mul_rsub_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.utils.data
class HingeLoss(nn.Module):
def __init__(self):
super(HingeLoss, self).__init__()
self.margin = 1.0
def hinge_loss(self, input, target):
output = self.margin - input.mul(target)
output[output.le(0)] = 0
return output.mean()
def forward(self, input, target):
return self.hinge_loss(input, target)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_index_put_lift_fresh_mean_mul_rsub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp2
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = tl.where(tmp6, tmp5, tmp4)
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 256.0
tmp12 = tmp10 / tmp11
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp12, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
buf2 = buf1
del buf1
get_raw_stream(0)
triton_per_fused_index_put_lift_fresh_mean_mul_rsub_0[grid(1)](buf2,
arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class HingeLossNew(nn.Module):
def __init__(self):
super(HingeLossNew, self).__init__()
self.margin = 1.0
def hinge_loss(self, input, target):
output = self.margin - input.mul(target)
output[output.le(0)] = 0
return output.mean()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Enderdead/BinaryConnect_PyTorch
|
HingeLoss
| false | 13,635 |
[
"MIT"
] | 75 |
990e970b1fbd299ff88200db21a9cc3fe44706d3
|
https://github.com/Enderdead/BinaryConnect_PyTorch/tree/990e970b1fbd299ff88200db21a9cc3fe44706d3
|
GaussionConvD
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/tc/ctcljh7q36b3xg5ofxo7xvjxbezjdewz3orlw4zhiiekkfy33ssc.py
# Topologically Sorted Source Nodes: [mean, var, mul, attention, mul_1, mul_2, mul_3], Original ATen: [aten.elu, aten.relu, aten.mul, aten.exp]
# Source node to ATen node mapping:
# attention => exp
# mean => expm1, gt, mul, mul_2, where
# mul => mul_3
# mul_1 => mul_4
# mul_2 => mul_5
# mul_3 => mul_6
# var => relu
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%mm, 0), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 1.0), kwargs = {})
# %expm1 : [num_users=1] = call_function[target=torch.ops.aten.expm1.default](args = (%mul,), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%expm1, 1.0), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %mul, %mul_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%mm_1,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, -1.0), kwargs = {})
# %exp : [num_users=3] = call_function[target=torch.ops.aten.exp.default](args = (%mul_3,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%where, %exp), kwargs = {})
# %mul_5 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %exp), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_5, %exp), kwargs = {})
triton_poi_fused_elu_exp_mul_relu_0 = async_compile.triton('triton_poi_fused_elu_exp_mul_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_elu_exp_mul_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_elu_exp_mul_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp8 = tl.load(in_ptr1 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = -1.0
tmp12 = tmp10 * tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp7 * tmp13
tmp15 = tmp10 * tmp13
tmp16 = tmp15 * tmp13
tl.store(out_ptr0 + (x0), tmp14, xmask)
tl.store(out_ptr1 + (x0), tmp15, xmask)
tl.store(out_ptr2 + (x0), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_4, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, var, mul, attention, mul_1, mul_2, mul_3], Original ATen: [aten.elu, aten.relu, aten.mul, aten.exp]
stream0 = get_raw_stream(0)
triton_poi_fused_elu_exp_mul_relu_0.run(buf0, buf1, buf2, buf4, buf5, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean, var, mul, attention, mul_1, mean_1], Original ATen: [aten.elu, aten.relu, aten.mul, aten.exp, aten.mm]
extern_kernels.mm(primals_5, buf2, out=buf3)
buf6 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [var, mul, attention, mul_3, var_1], Original ATen: [aten.relu, aten.mul, aten.exp, aten.mm]
extern_kernels.mm(primals_6, buf5, out=buf6)
del buf5
return (buf3, buf6, primals_2, primals_4, buf0, buf1, buf4, reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class GaussionConvD(nn.Module):
"""The subsequent layer in `RobustGCN` that takes node distribution (mean, var) as input"""
def __init__(self, in_features, out_features, bias=False, gamma=1.0):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.w_mean = nn.Linear(in_features, out_features, bias=bias)
self.w_var = nn.Linear(in_features, out_features, bias=bias)
self.gamma = gamma
def reset_parameters(self):
self.w.reset_parameters()
def forward(self, mean, var, adj_mean, adj_var):
mean = F.elu(self.w_mean(mean))
var = F.relu(self.w_var(var))
attention = torch.exp(-self.gamma * var)
mean = adj_mean.mm(mean * attention)
var = adj_var.mm(var * attention * attention)
return mean, var
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_features}, {self.out_features})'
)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_elu_exp_mul_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp8 = tl.load(in_ptr1 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 1.0
tmp4 = tmp0 * tmp3
tmp5 = libdevice.expm1(tmp4)
tmp6 = tmp5 * tmp3
tmp7 = tl.where(tmp2, tmp4, tmp6)
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tmp11 = -1.0
tmp12 = tmp10 * tmp11
tmp13 = tl_math.exp(tmp12)
tmp14 = tmp7 * tmp13
tmp15 = tmp10 * tmp13
tmp16 = tmp15 * tmp13
tl.store(out_ptr0 + x0, tmp14, xmask)
tl.store(out_ptr1 + x0, tmp15, xmask)
tl.store(out_ptr2 + x0, tmp16, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_4, reinterpret_tensor(primals_3, (4, 4),
(1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_elu_exp_mul_relu_0[grid(16)](buf0, buf1, buf2,
buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_5, buf2, out=buf3)
buf6 = buf2
del buf2
extern_kernels.mm(primals_6, buf5, out=buf6)
del buf5
return (buf3, buf6, primals_2, primals_4, buf0, buf1, buf4,
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
reinterpret_tensor(primals_5, (4, 4), (1, 4), 0))
class GaussionConvDNew(nn.Module):
"""The subsequent layer in `RobustGCN` that takes node distribution (mean, var) as input"""
def __init__(self, in_features, out_features, bias=False, gamma=1.0):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.w_mean = nn.Linear(in_features, out_features, bias=bias)
self.w_var = nn.Linear(in_features, out_features, bias=bias)
self.gamma = gamma
def reset_parameters(self):
self.w.reset_parameters()
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_features}, {self.out_features})'
)
def forward(self, input_0, input_1, input_2, input_3):
primals_1 = self.w_mean.weight
primals_2 = self.w_var.weight
primals_3 = input_0
primals_4 = input_1
primals_5 = input_2
primals_6 = input_3
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0], output[1]
|
EdisonLeeeee/GraphGallery
|
GaussionConvD
| false | 13,636 |
[
"MIT"
] | 300 |
4eec9c5136bda14809bd22584b26cc346cdb633b
|
https://github.com/EdisonLeeeee/GraphGallery/tree/4eec9c5136bda14809bd22584b26cc346cdb633b
|
APPNProp
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/q5/cq5rthx627l3kn64h6pwzbnpc3ofak65jjjfg5pcra7sqcqgjfe5.py
# Topologically Sorted Source Nodes: [mul, mul_1, h], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# h => add
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 0.9), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
triton_poi_fused_add_mul_0 = async_compile.triton('triton_poi_fused_add_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.9
tmp2 = tmp0 * tmp1
tmp4 = 0.1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mm], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, arg0_1, out=buf0)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, mul_1, h], Original ATen: [aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_0.run(buf1, arg0_1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1, h, mm_1], Original ATen: [aten.mul, aten.add, aten.mm]
extern_kernels.mm(arg1_1, buf1, out=buf2)
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [mul_2, mul_3, h_1], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_0.run(buf3, arg0_1, 16, grid=grid(16), stream=stream0)
buf4 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [mul_2, mul_3, h_1, mm_2], Original ATen: [aten.mul, aten.add, aten.mm]
extern_kernels.mm(arg1_1, buf3, out=buf4)
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [mul_4, mul_5, h_2], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_0.run(buf5, arg0_1, 16, grid=grid(16), stream=stream0)
buf6 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [mul_4, mul_5, h_2, mm_3], Original ATen: [aten.mul, aten.add, aten.mm]
extern_kernels.mm(arg1_1, buf5, out=buf6)
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [mul_6, mul_7, h_3], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_0.run(buf7, arg0_1, 16, grid=grid(16), stream=stream0)
buf8 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [mul_6, mul_7, h_3, mm_4], Original ATen: [aten.mul, aten.add, aten.mm]
extern_kernels.mm(arg1_1, buf7, out=buf8)
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [mul_8, mul_9, h_4], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_0.run(buf9, arg0_1, 16, grid=grid(16), stream=stream0)
buf10 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [mul_8, mul_9, h_4, mm_5], Original ATen: [aten.mul, aten.add, aten.mm]
extern_kernels.mm(arg1_1, buf9, out=buf10)
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [mul_10, mul_11, h_5], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_0.run(buf11, arg0_1, 16, grid=grid(16), stream=stream0)
buf12 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [mul_10, mul_11, h_5, mm_6], Original ATen: [aten.mul, aten.add, aten.mm]
extern_kernels.mm(arg1_1, buf11, out=buf12)
buf13 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [mul_12, mul_13, h_6], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_0.run(buf13, arg0_1, 16, grid=grid(16), stream=stream0)
buf14 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [mul_12, mul_13, h_6, mm_7], Original ATen: [aten.mul, aten.add, aten.mm]
extern_kernels.mm(arg1_1, buf13, out=buf14)
buf15 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [mul_14, mul_15, h_7], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_0.run(buf15, arg0_1, 16, grid=grid(16), stream=stream0)
buf16 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [mul_14, mul_15, h_7, mm_8], Original ATen: [aten.mul, aten.add, aten.mm]
extern_kernels.mm(arg1_1, buf15, out=buf16)
buf17 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [mul_16, mul_17, h_8], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_0.run(buf17, arg0_1, 16, grid=grid(16), stream=stream0)
buf18 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [mul_16, mul_17, h_8, mm_9], Original ATen: [aten.mul, aten.add, aten.mm]
extern_kernels.mm(arg1_1, buf17, out=buf18)
del arg1_1
del buf17
buf19 = buf18; del buf18 # reuse
# Topologically Sorted Source Nodes: [mul_18, mul_19, h_9], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_0.run(buf19, arg0_1, 16, grid=grid(16), stream=stream0)
del arg0_1
return (buf19, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SparseDropout(nn.Module):
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, x):
x_coal = x.coalesce()
drop_val = F.dropout(x_coal._values(), self.p, self.training)
return torch.sparse.FloatTensor(x_coal._indices(), drop_val, x.shape)
class MixedDropout(nn.Module):
def __init__(self, p=0.5):
super().__init__()
self.dense_dropout = nn.Dropout(p)
self.sparse_dropout = SparseDropout(p)
def forward(self, x):
if x.is_sparse:
return self.sparse_dropout(x)
else:
return self.dense_dropout(x)
class APPNProp(nn.Module):
def __init__(self, alpha: 'float'=0.1, K: 'int'=10, dropout: 'float'=0.0):
super().__init__()
self.alpha = alpha
self.K = K
if not dropout:
self.dropout = lambda x: x
else:
self.dropout = MixedDropout(dropout)
def forward(self, x, adj):
h = x
for _ in range(self.K):
A_drop = self.dropout(adj)
h = (1 - self.alpha) * A_drop.mm(h) + self.alpha * x
return h
def __repr__(self):
return (
f'{self.__class__.__name__}(alpha={self.alpha}, K={self.K}, dropout={self.dropout})'
)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.9
tmp2 = tmp0 * tmp1
tmp4 = 0.1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, arg0_1, out=buf0)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_mul_0[grid(16)](buf1, arg0_1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf1, out=buf2)
buf3 = buf2
del buf2
triton_poi_fused_add_mul_0[grid(16)](buf3, arg0_1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf4 = buf1
del buf1
extern_kernels.mm(arg1_1, buf3, out=buf4)
buf5 = buf4
del buf4
triton_poi_fused_add_mul_0[grid(16)](buf5, arg0_1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = buf3
del buf3
extern_kernels.mm(arg1_1, buf5, out=buf6)
buf7 = buf6
del buf6
triton_poi_fused_add_mul_0[grid(16)](buf7, arg0_1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf8 = buf5
del buf5
extern_kernels.mm(arg1_1, buf7, out=buf8)
buf9 = buf8
del buf8
triton_poi_fused_add_mul_0[grid(16)](buf9, arg0_1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf10 = buf7
del buf7
extern_kernels.mm(arg1_1, buf9, out=buf10)
buf11 = buf10
del buf10
triton_poi_fused_add_mul_0[grid(16)](buf11, arg0_1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf12 = buf9
del buf9
extern_kernels.mm(arg1_1, buf11, out=buf12)
buf13 = buf12
del buf12
triton_poi_fused_add_mul_0[grid(16)](buf13, arg0_1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf14 = buf11
del buf11
extern_kernels.mm(arg1_1, buf13, out=buf14)
buf15 = buf14
del buf14
triton_poi_fused_add_mul_0[grid(16)](buf15, arg0_1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf16 = buf13
del buf13
extern_kernels.mm(arg1_1, buf15, out=buf16)
buf17 = buf16
del buf16
triton_poi_fused_add_mul_0[grid(16)](buf17, arg0_1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf18 = buf15
del buf15
extern_kernels.mm(arg1_1, buf17, out=buf18)
del arg1_1
del buf17
buf19 = buf18
del buf18
triton_poi_fused_add_mul_0[grid(16)](buf19, arg0_1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del arg0_1
return buf19,
class SparseDropout(nn.Module):
def __init__(self, p=0.5):
super().__init__()
self.p = p
def forward(self, x):
x_coal = x.coalesce()
drop_val = F.dropout(x_coal._values(), self.p, self.training)
return torch.sparse.FloatTensor(x_coal._indices(), drop_val, x.shape)
class MixedDropout(nn.Module):
def __init__(self, p=0.5):
super().__init__()
self.dense_dropout = nn.Dropout(p)
self.sparse_dropout = SparseDropout(p)
def forward(self, x):
if x.is_sparse:
return self.sparse_dropout(x)
else:
return self.dense_dropout(x)
class APPNPropNew(nn.Module):
def __init__(self, alpha: 'float'=0.1, K: 'int'=10, dropout: 'float'=0.0):
super().__init__()
self.alpha = alpha
self.K = K
if not dropout:
self.dropout = lambda x: x
else:
self.dropout = MixedDropout(dropout)
def __repr__(self):
return (
f'{self.__class__.__name__}(alpha={self.alpha}, K={self.K}, dropout={self.dropout})'
)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EdisonLeeeee/GraphGallery
|
APPNProp
| false | 13,637 |
[
"MIT"
] | 300 |
4eec9c5136bda14809bd22584b26cc346cdb633b
|
https://github.com/EdisonLeeeee/GraphGallery/tree/4eec9c5136bda14809bd22584b26cc346cdb633b
|
CumulativeLinkLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qo/cqof2fy7nmp4btgspyo45t275x6uewwjbudp2ilglmlog753zdfy.py
# Topologically Sorted Source Nodes: [gather, likelihoods, log, neg_log_likelihood, loss], Original ATen: [aten.gather, aten.clamp, aten.log, aten.neg, aten.mean]
# Source node to ATen node mapping:
# gather => gather
# likelihoods => clamp_max, clamp_min, convert_element_type
# log => log
# loss => mean
# neg_log_likelihood => neg
# Graph fragment:
# %gather : [num_users=1] = call_function[target=torch.ops.aten.gather.default](args = (%arg0_1, 1, %arg1_1), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%gather, 1e-15), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 0.999999999999999), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%clamp_max, torch.int64), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%convert_element_type,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%log,), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%neg,), kwargs = {})
triton_per_fused_clamp_gather_log_mean_neg_0 = async_compile.triton('triton_per_fused_clamp_gather_log_mean_neg_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_clamp_gather_log_mean_neg_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_clamp_gather_log_mean_neg_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4), "index out of bounds: 0 <= tmp4 < 4")
tmp6 = tl.load(in_ptr1 + (tmp4 + (4*r1)), None, eviction_policy='evict_last')
tmp7 = tmp6.to(tl.float32)
tmp8 = 1e-15
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = 0.999999999999999
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp12 = tmp11.to(tl.int64)
tmp13 = tmp12.to(tl.float32)
tmp14 = tl_math.log(tmp13)
tmp15 = -tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [gather, likelihoods, log, neg_log_likelihood, loss], Original ATen: [aten.gather, aten.clamp, aten.log, aten.neg, aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_clamp_gather_log_mean_neg_0.run(buf1, arg1_1, arg0_1, 1, 16, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.int64)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import numpy as np
from torch import nn
from typing import Optional
def _reduction(loss: 'torch.Tensor', reduction: 'str') ->torch.Tensor:
"""
Reduce loss
Parameters
----------
loss : torch.Tensor, [batch_size, num_classes]
Batch losses.
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
Returns
-------
loss : torch.Tensor
Reduced loss.
"""
if reduction == 'elementwise_mean':
return loss.mean()
elif reduction == 'none':
return loss
elif reduction == 'sum':
return loss.sum()
else:
raise ValueError(f'{reduction} is not a valid reduction')
def cumulative_link_loss(y_pred: 'torch.Tensor', y_true: 'torch.Tensor',
reduction: 'str'='elementwise_mean', class_weights:
'Optional[np.ndarray]'=None) ->torch.Tensor:
"""
Calculates the negative log likelihood using the logistic cumulative link
function.
See "On the consistency of ordinal regression methods", Pedregosa et. al.
for more details. While this paper is not the first to introduce this, it
is the only one that I could find that was easily readable outside of
paywalls.
Parameters
----------
y_pred : torch.Tensor, [batch_size, num_classes]
Predicted target class probabilities. float dtype.
y_true : torch.Tensor, [batch_size, 1]
True target classes. long dtype.
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
class_weights : np.ndarray, [num_classes] optional (default=None)
An array of weights for each class. If included, then for each sample,
look up the true class and multiply that sample's loss by the weight in
this array.
Returns
-------
loss: torch.Tensor
"""
eps = 1e-15
likelihoods = torch.clamp(torch.gather(y_pred, 1, y_true), eps, 1 - eps)
neg_log_likelihood = -torch.log(likelihoods)
if class_weights is not None:
class_weights = torch.as_tensor(class_weights, dtype=
neg_log_likelihood.dtype, device=neg_log_likelihood.device)
neg_log_likelihood *= class_weights[y_true]
loss = _reduction(neg_log_likelihood, reduction)
return loss
class CumulativeLinkLoss(nn.Module):
"""
Module form of cumulative_link_loss() loss function
Parameters
----------
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
class_weights : np.ndarray, [num_classes] optional (default=None)
An array of weights for each class. If included, then for each sample,
look up the true class and multiply that sample's loss by the weight in
this array.
"""
def __init__(self, reduction: 'str'='elementwise_mean', class_weights:
'Optional[torch.Tensor]'=None) ->None:
super().__init__()
self.class_weights = class_weights
self.reduction = reduction
def forward(self, y_pred: 'torch.Tensor', y_true: 'torch.Tensor'
) ->torch.Tensor:
return cumulative_link_loss(y_pred, y_true, reduction=self.
reduction, class_weights=self.class_weights)
def get_inputs():
return [torch.ones([4, 4], dtype=torch.int64), torch.ones([4, 4], dtype
=torch.int64)]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import numpy as np
from torch import nn
from typing import Optional
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_clamp_gather_log_mean_neg_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp1 = tl.full([XBLOCK, RBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tl.device_assert((0 <= tmp4) & (tmp4 < 4),
'index out of bounds: 0 <= tmp4 < 4')
tmp6 = tl.load(in_ptr1 + (tmp4 + 4 * r1), None, eviction_policy=
'evict_last')
tmp7 = tmp6.to(tl.float32)
tmp8 = 1e-15
tmp9 = triton_helpers.maximum(tmp7, tmp8)
tmp10 = 0.999999999999999
tmp11 = triton_helpers.minimum(tmp9, tmp10)
tmp12 = tmp11.to(tl.int64)
tmp13 = tmp12.to(tl.float32)
tmp14 = tl_math.log(tmp13)
tmp15 = -tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.sum(tmp16, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_clamp_gather_log_mean_neg_0[grid(1)](buf1, arg1_1,
arg0_1, 1, 16, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
def _reduction(loss: 'torch.Tensor', reduction: 'str') ->torch.Tensor:
"""
Reduce loss
Parameters
----------
loss : torch.Tensor, [batch_size, num_classes]
Batch losses.
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
Returns
-------
loss : torch.Tensor
Reduced loss.
"""
if reduction == 'elementwise_mean':
return loss.mean()
elif reduction == 'none':
return loss
elif reduction == 'sum':
return loss.sum()
else:
raise ValueError(f'{reduction} is not a valid reduction')
def cumulative_link_loss(y_pred: 'torch.Tensor', y_true: 'torch.Tensor',
reduction: 'str'='elementwise_mean', class_weights:
'Optional[np.ndarray]'=None) ->torch.Tensor:
"""
Calculates the negative log likelihood using the logistic cumulative link
function.
See "On the consistency of ordinal regression methods", Pedregosa et. al.
for more details. While this paper is not the first to introduce this, it
is the only one that I could find that was easily readable outside of
paywalls.
Parameters
----------
y_pred : torch.Tensor, [batch_size, num_classes]
Predicted target class probabilities. float dtype.
y_true : torch.Tensor, [batch_size, 1]
True target classes. long dtype.
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
class_weights : np.ndarray, [num_classes] optional (default=None)
An array of weights for each class. If included, then for each sample,
look up the true class and multiply that sample's loss by the weight in
this array.
Returns
-------
loss: torch.Tensor
"""
eps = 1e-15
likelihoods = torch.clamp(torch.gather(y_pred, 1, y_true), eps, 1 - eps)
neg_log_likelihood = -torch.log(likelihoods)
if class_weights is not None:
class_weights = torch.as_tensor(class_weights, dtype=
neg_log_likelihood.dtype, device=neg_log_likelihood.device)
neg_log_likelihood *= class_weights[y_true]
loss = _reduction(neg_log_likelihood, reduction)
return loss
class CumulativeLinkLossNew(nn.Module):
"""
Module form of cumulative_link_loss() loss function
Parameters
----------
reduction : str
Method for reducing the loss. Options include 'elementwise_mean',
'none', and 'sum'.
class_weights : np.ndarray, [num_classes] optional (default=None)
An array of weights for each class. If included, then for each sample,
look up the true class and multiply that sample's loss by the weight in
this array.
"""
def __init__(self, reduction: 'str'='elementwise_mean', class_weights:
'Optional[torch.Tensor]'=None) ->None:
super().__init__()
self.class_weights = class_weights
self.reduction = reduction
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EthanRosenthal/spacecutter
|
CumulativeLinkLoss
| false | 13,638 |
[
"MIT"
] | 74 |
37a6f7367905b50e7886dc1ef2bfe1d63220347a
|
https://github.com/EthanRosenthal/spacecutter/tree/37a6f7367905b50e7886dc1ef2bfe1d63220347a
|
SinkhornKnopp
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3g/c3gd5ipjfbufzu6hvujxufg6z3emufd62cvcqjyy3muqew2xvzbd.py
# Topologically Sorted Source Nodes: [sum_Q], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_Q => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%permute,), kwargs = {})
triton_per_fused_sum_0 = async_compile.triton('triton_per_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pb/cpb5rsib7fg2llnv7dg2enzvdhrqqrlufmxvryadr6zckhtmyysm.py
# Topologically Sorted Source Nodes: [sum_of_rows], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows => sum_2
# Graph fragment:
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_2, [1], True), kwargs = {})
triton_poi_fused_sum_1 = async_compile.triton('triton_poi_fused_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + (x0), tmp21, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ay/cayipr5tzejzfvzl7ba3w7cqhf2axor2ag2cqam22ambv3zsqfqn.py
# Topologically Sorted Source Nodes: [sum_3], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_3 => sum_3
# Graph fragment:
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_6, [0], True), kwargs = {})
triton_poi_fused_sum_2 = async_compile.triton('triton_poi_fused_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 9, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (0))
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr2 + (1))
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (2))
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr2 + (3))
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + (x0), tmp38, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gf/cgfazley3l5rdd6ypj2skdwbxoltt6cdo3d3mpt4z3kqywdlsil5.py
# Topologically Sorted Source Nodes: [sum_of_rows_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_of_rows_1 => sum_4
# Graph fragment:
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%permute_10, [1], True), kwargs = {})
triton_poi_fused_sum_3 = async_compile.triton('triton_poi_fused_sum_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 10, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask)
tmp11 = tl.load(in_ptr3 + (0))
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp21 = tl.load(in_ptr3 + (1))
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp32 = tl.load(in_ptr3 + (2))
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp43 = tl.load(in_ptr3 + (3))
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + (x0), tmp47, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/im/cimyq4ncn2lt7wfmnkrejd4t7hctagm26a6wsjpob7y2e6gdebuy.py
# Topologically Sorted Source Nodes: [Q_7], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_7 => div_7
# Graph fragment:
# %div_7 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_12, 4), kwargs = {})
triton_poi_fused_div_4 = async_compile.triton('triton_poi_fused_div_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp4 = tl.load(in_ptr1 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp9
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp9
tl.store(out_ptr0 + (x2), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f4/cf4q6mh4hv4abtcbirw4vkce7au36jgqitbjdciddckwumhdqyiq.py
# Topologically Sorted Source Nodes: [Q_9], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_9 => div_9
# Graph fragment:
# %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_16, 4), kwargs = {})
triton_poi_fused_div_5 = async_compile.triton('triton_poi_fused_div_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zl/czlhaiakbynkvvwkmvsahwmxzw7xdccr5ho7bxsxvn4m725ewzpe.py
# Topologically Sorted Source Nodes: [Q_11], Original ATen: [aten.div]
# Source node to ATen node mapping:
# Q_11 => div_11
# Graph fragment:
# %div_11 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%permute_20, 4), kwargs = {})
triton_poi_fused_div_6 = async_compile.triton('triton_poi_fused_div_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/36/c36xk4rs42yzbwetmlob65txyg64pwcsheobuwb6xw3pwyopqghz.py
# Topologically Sorted Source Nodes: [Q_14], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# Q_14 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%permute_26, 4), kwargs = {})
triton_poi_fused_mul_7 = async_compile.triton('triton_poi_fused_mul_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = 4.0
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + (x1 + (4*y0)), tmp12, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dt/cdt2v5rbgu26tdvjkb5jbjk3tqziqwvnlr3boatkbhu2j3hsthua.py
# Topologically Sorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
# Graph fragment:
# %permute_31 : [num_users=1] = call_function[target=torch.ops.aten.permute.default](args = (%permute_30, [1, 0]), kwargs = {})
triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x1)), xmask & ymask)
tl.store(out_ptr0 + (x1 + (4*y0)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [sum_Q], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_sum_0.run(arg0_1, buf0, 1, 16, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [sum_of_rows], Original ATen: [aten.sum]
triton_poi_fused_sum_1.run(arg0_1, buf0, buf1, 4, grid=grid(4), stream=stream0)
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_3], Original ATen: [aten.sum]
triton_poi_fused_sum_2.run(arg0_1, buf0, buf1, buf2, 4, grid=grid(4), stream=stream0)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [sum_of_rows_1], Original ATen: [aten.sum]
triton_poi_fused_sum_3.run(arg0_1, buf0, buf1, buf2, buf3, 4, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_7], Original ATen: [aten.div]
triton_poi_fused_div_4.run(arg0_1, buf0, buf1, buf2, buf3, buf4, 16, grid=grid(16), stream=stream0)
del arg0_1
del buf0
del buf1
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [Q_9], Original ATen: [aten.div]
triton_poi_fused_div_5.run(buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [Q_11], Original ATen: [aten.div]
triton_poi_fused_div_6.run(buf5, buf6, 16, grid=grid(16), stream=stream0)
buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [Q_14], Original ATen: [aten.mul]
triton_poi_fused_mul_7.run(buf6, buf7, 4, 4, grid=grid(4, 4), stream=stream0)
buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(buf7, buf8, 4, 4, grid=grid(4, 4), stream=stream0)
del buf7
return (buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
class SinkhornKnopp(torch.nn.Module):
def __init__(self, num_iters=3, epsilon=0.05):
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
@torch.no_grad()
def forward(self, logits):
Q = torch.exp(logits / self.epsilon).t()
B = Q.shape[1]
K = Q.shape[0]
sum_Q = torch.sum(Q)
Q /= sum_Q
for it in range(self.num_iters):
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
Q /= sum_of_rows
Q /= K
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B
return Q.t()
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_sum_0(in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.
constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.sum(tmp4, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp6, None)
@triton.jit
def triton_poi_fused_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp12 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp17 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp7 * tmp1
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp9 / tmp5
tmp11 = tmp6 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp16 = tmp11 + tmp15
tmp18 = tmp17 * tmp1
tmp19 = tl_math.exp(tmp18)
tmp20 = tmp19 / tmp5
tmp21 = tmp16 + tmp20
tl.store(out_ptr0 + x0, tmp21, xmask)
@triton.jit
def triton_poi_fused_sum_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + 0)
tmp8 = tl.broadcast_to(tmp7, [XBLOCK])
tmp12 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr2 + 1)
tmp17 = tl.broadcast_to(tmp16, [XBLOCK])
tmp21 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp25 = tl.load(in_ptr2 + 2)
tmp26 = tl.broadcast_to(tmp25, [XBLOCK])
tmp30 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp34 = tl.load(in_ptr2 + 3)
tmp35 = tl.broadcast_to(tmp34, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp9 = tmp6 / tmp8
tmp10 = 0.25
tmp11 = tmp9 * tmp10
tmp13 = tmp12 * tmp1
tmp14 = tl_math.exp(tmp13)
tmp15 = tmp14 / tmp5
tmp18 = tmp15 / tmp17
tmp19 = tmp18 * tmp10
tmp20 = tmp11 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tl_math.exp(tmp22)
tmp24 = tmp23 / tmp5
tmp27 = tmp24 / tmp26
tmp28 = tmp27 * tmp10
tmp29 = tmp20 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tl_math.exp(tmp31)
tmp33 = tmp32 / tmp5
tmp36 = tmp33 / tmp35
tmp37 = tmp36 * tmp10
tmp38 = tmp29 + tmp37
tl.store(out_ptr0 + x0, tmp38, xmask)
@triton.jit
def triton_poi_fused_sum_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask)
tmp11 = tl.load(in_ptr3 + 0)
tmp12 = tl.broadcast_to(tmp11, [XBLOCK])
tmp15 = tl.load(in_ptr0 + (4 + x0), xmask)
tmp21 = tl.load(in_ptr3 + 1)
tmp22 = tl.broadcast_to(tmp21, [XBLOCK])
tmp26 = tl.load(in_ptr0 + (8 + x0), xmask)
tmp32 = tl.load(in_ptr3 + 2)
tmp33 = tl.broadcast_to(tmp32, [XBLOCK])
tmp37 = tl.load(in_ptr0 + (12 + x0), xmask)
tmp43 = tl.load(in_ptr3 + 3)
tmp44 = tl.broadcast_to(tmp43, [XBLOCK])
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp13 = tmp10 / tmp12
tmp14 = tmp13 * tmp9
tmp16 = tmp15 * tmp1
tmp17 = tl_math.exp(tmp16)
tmp18 = tmp17 / tmp5
tmp19 = tmp18 / tmp7
tmp20 = tmp19 * tmp9
tmp23 = tmp20 / tmp22
tmp24 = tmp23 * tmp9
tmp25 = tmp14 + tmp24
tmp27 = tmp26 * tmp1
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp28 / tmp5
tmp30 = tmp29 / tmp7
tmp31 = tmp30 * tmp9
tmp34 = tmp31 / tmp33
tmp35 = tmp34 * tmp9
tmp36 = tmp25 + tmp35
tmp38 = tmp37 * tmp1
tmp39 = tl_math.exp(tmp38)
tmp40 = tmp39 / tmp5
tmp41 = tmp40 / tmp7
tmp42 = tmp41 * tmp9
tmp45 = tmp42 / tmp44
tmp46 = tmp45 * tmp9
tmp47 = tmp36 + tmp46
tl.store(out_ptr0 + x0, tmp47, xmask)
@triton.jit
def triton_poi_fused_div_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp4 = tl.load(in_ptr1 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp7 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp1 = 20.0
tmp2 = tmp0 * tmp1
tmp3 = tl_math.exp(tmp2)
tmp6 = tmp3 / tmp5
tmp8 = tmp6 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp12 = tmp10 / tmp11
tmp13 = tmp12 * tmp9
tmp15 = tmp13 / tmp14
tmp16 = tmp15 * tmp9
tl.store(out_ptr0 + x2, tmp16, xmask)
@triton.jit
def triton_poi_fused_div_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_div_6(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_mul_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tmp9 = 0.25
tmp10 = tmp8 * tmp9
tmp11 = 4.0
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + (x1 + 4 * y0), tmp12, xmask & ymask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 4
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x1 = xindex
y0 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x1), xmask & ymask)
tl.store(out_ptr0 + (x1 + 4 * y0), tmp0, xmask & ymask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_sum_0[grid(1)](arg0_1, buf0, 1, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_1[grid(4)](arg0_1, buf0, buf1, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((1, 4), (4, 1), torch.float32)
triton_poi_fused_sum_2[grid(4)](arg0_1, buf0, buf1, buf2, 4, XBLOCK
=4, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
triton_poi_fused_sum_3[grid(4)](arg0_1, buf0, buf1, buf2, buf3, 4,
XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_4[grid(16)](arg0_1, buf0, buf1, buf2, buf3,
buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del buf0
del buf1
del buf2
del buf3
buf5 = empty_strided_cuda((4, 4), (1, 4), torch.float32)
triton_poi_fused_div_5[grid(16)](buf4, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf6 = buf4
del buf4
triton_poi_fused_div_6[grid(16)](buf5, buf6, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = reinterpret_tensor(buf5, (4, 4), (4, 1), 0)
del buf5
triton_poi_fused_mul_7[grid(4, 4)](buf6, buf7, 4, 4, XBLOCK=4,
YBLOCK=4, num_warps=1, num_stages=1)
buf8 = reinterpret_tensor(buf6, (4, 4), (4, 1), 0)
del buf6
triton_poi_fused_8[grid(4, 4)](buf7, buf8, 4, 4, XBLOCK=4, YBLOCK=4,
num_warps=1, num_stages=1)
del buf7
return buf8,
class SinkhornKnoppNew(torch.nn.Module):
def __init__(self, num_iters=3, epsilon=0.05):
super().__init__()
self.num_iters = num_iters
self.epsilon = epsilon
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
DonkeyShot21/UNO
|
SinkhornKnopp
| false | 13,639 |
[
"MIT"
] | 87 |
7613d3f8c58e5f16ee0d68fdd803ef442d819af4
|
https://github.com/DonkeyShot21/UNO/tree/7613d3f8c58e5f16ee0d68fdd803ef442d819af4
|
DAGNNConv
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/jj/cjjiphzf24trw5axa2pmjeyaxi43ixlzlidljcludzymowb4xjf4.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# h => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %mm, %mm_1, %mm_2, %mm_3, %mm_4, %mm_5, %mm_6, %mm_7, %mm_8, %mm_9], 1), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (44*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vz/cvz2b43gw7evjx7y4go5psaewdyw7vxpnuzbnsgioqvsaqvketru.py
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# h => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_2, %mm, %mm_1, %mm_2, %mm_3, %mm_4, %mm_5, %mm_6, %mm_7, %mm_8, %mm_9], 1), kwargs = {})
triton_poi_fused_stack_1 = async_compile.triton('triton_poi_fused_stack_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (44*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2v/c2v75im7azf6ydjugomrvbgk2qfdv2227zl2y3hb44a525i6c3ql.py
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
# Source node to ATen node mapping:
# sigmoid => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%view_2,), kwargs = {})
triton_poi_fused_sigmoid_2 = async_compile.triton('triton_poi_fused_sigmoid_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sigmoid_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 44
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, primals_2, out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, buf0, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, buf1, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, buf2, out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, buf3, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, buf4, out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, buf6, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, buf7, out=buf8)
buf20 = empty_strided_cuda((4, 44), (44, 1), torch.float32)
buf9 = reinterpret_tensor(buf20, (4, 4), (44, 1), 40) # alias
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.mm]
extern_kernels.mm(primals_1, buf8, out=buf9)
del primals_1
buf10 = reinterpret_tensor(buf20, (4, 4), (44, 1), 0) # alias
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(primals_2, buf10, 16, grid=grid(16), stream=stream0)
del primals_2
buf11 = reinterpret_tensor(buf20, (4, 4), (44, 1), 4) # alias
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack]
triton_poi_fused_stack_1.run(buf0, buf11, 16, grid=grid(16), stream=stream0)
del buf0
buf12 = reinterpret_tensor(buf20, (4, 4), (44, 1), 8) # alias
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack]
triton_poi_fused_stack_1.run(buf1, buf12, 16, grid=grid(16), stream=stream0)
del buf1
buf13 = reinterpret_tensor(buf20, (4, 4), (44, 1), 12) # alias
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack]
triton_poi_fused_stack_1.run(buf2, buf13, 16, grid=grid(16), stream=stream0)
del buf2
buf14 = reinterpret_tensor(buf20, (4, 4), (44, 1), 16) # alias
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack]
triton_poi_fused_stack_0.run(buf3, buf14, 16, grid=grid(16), stream=stream0)
del buf3
buf15 = reinterpret_tensor(buf20, (4, 4), (44, 1), 20) # alias
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack]
triton_poi_fused_stack_1.run(buf4, buf15, 16, grid=grid(16), stream=stream0)
del buf4
buf16 = reinterpret_tensor(buf20, (4, 4), (44, 1), 24) # alias
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack]
triton_poi_fused_stack_1.run(buf5, buf16, 16, grid=grid(16), stream=stream0)
del buf5
buf17 = reinterpret_tensor(buf20, (4, 4), (44, 1), 28) # alias
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack]
triton_poi_fused_stack_1.run(buf6, buf17, 16, grid=grid(16), stream=stream0)
del buf6
buf18 = reinterpret_tensor(buf20, (4, 4), (44, 1), 32) # alias
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack]
triton_poi_fused_stack_0.run(buf7, buf18, 16, grid=grid(16), stream=stream0)
del buf7
buf19 = reinterpret_tensor(buf20, (4, 4), (44, 1), 36) # alias
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.stack]
triton_poi_fused_stack_1.run(buf8, buf19, 16, grid=grid(16), stream=stream0)
buf21 = empty_strided_cuda((44, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf20, (44, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 1), (1, 4), 0), out=buf21)
del primals_3
buf22 = empty_strided_cuda((4, 11, 1), (11, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigmoid], Original ATen: [aten.sigmoid]
triton_poi_fused_sigmoid_2.run(buf21, buf22, 44, grid=grid(44), stream=stream0)
buf23 = reinterpret_tensor(buf8, (4, 1, 4), (4, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf22, (4, 1, 11), (11, 0, 1), 0), reinterpret_tensor(buf20, (4, 11, 4), (44, 4, 1), 0), out=buf23)
del buf22
return (reinterpret_tensor(buf23, (4, 4), (4, 1), 0), buf20, buf21, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class DAGNNConv(nn.Module):
def __init__(self, in_features, out_features=1, K=10, bias=False):
super().__init__()
assert out_features == 1, "'out_features' must be 1"
self.in_features = in_features
self.out_features = out_features
self.lin = nn.Linear(in_features, out_features, bias=bias)
self.K = K
self.act = nn.Sigmoid()
def reset_parameters(self):
self.lin.reset_parameters()
def forward(self, x, adj):
propagations = [x]
for _ in range(self.K):
x = adj.mm(x)
propagations.append(x)
h = torch.stack(propagations, dim=1)
retain_score = self.act(self.lin(h)).permute(0, 2, 1).contiguous()
out = (retain_score @ h).squeeze(1)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K})'
)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 44 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_stack_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 44 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_sigmoid_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 44
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.sigmoid(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (1, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, primals_2, out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, buf0, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, buf1, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, buf2, out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, buf3, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, buf4, out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, buf6, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_1, buf7, out=buf8)
buf20 = empty_strided_cuda((4, 44), (44, 1), torch.float32)
buf9 = reinterpret_tensor(buf20, (4, 4), (44, 1), 40)
extern_kernels.mm(primals_1, buf8, out=buf9)
del primals_1
buf10 = reinterpret_tensor(buf20, (4, 4), (44, 1), 0)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(16)](primals_2, buf10, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_2
buf11 = reinterpret_tensor(buf20, (4, 4), (44, 1), 4)
triton_poi_fused_stack_1[grid(16)](buf0, buf11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
buf12 = reinterpret_tensor(buf20, (4, 4), (44, 1), 8)
triton_poi_fused_stack_1[grid(16)](buf1, buf12, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf1
buf13 = reinterpret_tensor(buf20, (4, 4), (44, 1), 12)
triton_poi_fused_stack_1[grid(16)](buf2, buf13, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf2
buf14 = reinterpret_tensor(buf20, (4, 4), (44, 1), 16)
triton_poi_fused_stack_0[grid(16)](buf3, buf14, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf3
buf15 = reinterpret_tensor(buf20, (4, 4), (44, 1), 20)
triton_poi_fused_stack_1[grid(16)](buf4, buf15, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf4
buf16 = reinterpret_tensor(buf20, (4, 4), (44, 1), 24)
triton_poi_fused_stack_1[grid(16)](buf5, buf16, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf5
buf17 = reinterpret_tensor(buf20, (4, 4), (44, 1), 28)
triton_poi_fused_stack_1[grid(16)](buf6, buf17, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf6
buf18 = reinterpret_tensor(buf20, (4, 4), (44, 1), 32)
triton_poi_fused_stack_0[grid(16)](buf7, buf18, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf7
buf19 = reinterpret_tensor(buf20, (4, 4), (44, 1), 36)
triton_poi_fused_stack_1[grid(16)](buf8, buf19, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf21 = empty_strided_cuda((44, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf20, (44, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 1), (1, 4), 0), out=buf21)
del primals_3
buf22 = empty_strided_cuda((4, 11, 1), (11, 1, 1), torch.float32)
triton_poi_fused_sigmoid_2[grid(44)](buf21, buf22, 44, XBLOCK=64,
num_warps=1, num_stages=1)
buf23 = reinterpret_tensor(buf8, (4, 1, 4), (4, 4, 1), 0)
del buf8
extern_kernels.bmm(reinterpret_tensor(buf22, (4, 1, 11), (11, 0, 1),
0), reinterpret_tensor(buf20, (4, 11, 4), (44, 4, 1), 0), out=buf23
)
del buf22
return reinterpret_tensor(buf23, (4, 4), (4, 1), 0), buf20, buf21
class DAGNNConvNew(nn.Module):
def __init__(self, in_features, out_features=1, K=10, bias=False):
super().__init__()
assert out_features == 1, "'out_features' must be 1"
self.in_features = in_features
self.out_features = out_features
self.lin = nn.Linear(in_features, out_features, bias=bias)
self.K = K
self.act = nn.Sigmoid()
def reset_parameters(self):
self.lin.reset_parameters()
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K})'
)
def forward(self, input_0, input_1):
primals_3 = self.lin.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EdisonLeeeee/GraphGallery
|
DAGNNConv
| false | 13,640 |
[
"MIT"
] | 300 |
4eec9c5136bda14809bd22584b26cc346cdb633b
|
https://github.com/EdisonLeeeee/GraphGallery/tree/4eec9c5136bda14809bd22584b26cc346cdb633b
|
TransitionUp
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/re/creleqpucbzvzrso3whbekvyjzfafblr33ygekztpuugjz5zfqbd.py
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# out_2 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%slice_4, %primals_4], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 16) % 8
x0 = xindex % 4
x1 = (xindex // 4) % 4
x3 = (xindex // 128)
x4 = xindex % 16
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (20 + x0 + (9*x1) + (81*x2) + (324*x3)), tmp4 & xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + (x2), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tmp11 = tl.full([1], 8, tl.int64)
tmp12 = tmp0 < tmp11
tmp13 = tl.load(in_ptr2 + (x4 + (16*((-4) + x2)) + (64*x3)), tmp10 & xmask, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + (x5), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(buf0, primals_2, primals_4, buf1, 512, grid=grid(512), stream=stream0)
del buf0
del primals_2
del primals_4
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.utils.data
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:xy2 + max_height, xy1:xy1 + max_width]
class TransitionUp(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convTrans = nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=3, stride=2, padding=0,
bias=True)
def forward(self, x, skip):
out = self.convTrans(x)
out = center_crop(out, skip.size(2), skip.size(3))
out = torch.cat([out, skip], 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 16 % 8
x0 = xindex % 4
x1 = xindex // 4 % 4
x3 = xindex // 128
x4 = xindex % 16
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (20 + x0 + 9 * x1 + 81 * x2 + 324 * x3), tmp4 &
xmask, other=0.0)
tmp6 = tl.load(in_ptr1 + x2, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp7 = tmp5 + tmp6
tmp8 = tl.full(tmp7.shape, 0.0, tmp7.dtype)
tmp9 = tl.where(tmp4, tmp7, tmp8)
tmp10 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp13 = tl.load(in_ptr2 + (x4 + 16 * (-4 + x2) + 64 * x3), tmp10 &
xmask, other=0.0)
tmp14 = tl.where(tmp4, tmp9, tmp13)
tl.store(out_ptr0 + x5, tmp14, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(2,
2), padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 9, 9), (324, 81, 9, 1))
buf1 = empty_strided_cuda((4, 8, 4, 4), (128, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(512)](buf0, primals_2, primals_4, buf1,
512, XBLOCK=256, num_warps=4, num_stages=1)
del buf0
del primals_2
del primals_4
return buf1, primals_1, primals_3
def center_crop(layer, max_height, max_width):
_, _, h, w = layer.size()
xy1 = (w - max_width) // 2
xy2 = (h - max_height) // 2
return layer[:, :, xy2:xy2 + max_height, xy1:xy1 + max_width]
class TransitionUpNew(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.convTrans = nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=3, stride=2, padding=0,
bias=True)
def forward(self, input_0, input_1):
primals_1 = self.convTrans.weight
primals_2 = self.convTrans.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
ELEKTRONN/elektronn3
|
TransitionUp
| false | 13,641 |
[
"MIT"
] | 124 |
19c751855dffc67b744cd43e757aa4a5bd577d9b
|
https://github.com/ELEKTRONN/elektronn3/tree/19c751855dffc67b744cd43e757aa4a5bd577d9b
|
SSGConv
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/h7/ch7gznsrcg4y4o5d2ifvetpxhe6c5ieteeuh5azx66ij3nbrggv7.py
# Topologically Sorted Source Nodes: [x_out_1, mul_1, x_out_2, mul_2, x_out_3, mul_3, x_out_4, mul_4, x_out_5, mul_5, x_out_6, mul_6, x_out_7, mul_7, x_out_8, mul_8, x_out_9, mul_9, x_out_10, mul_10, x_out_11, mul_11, x_out_12, mul_12, x_out_13, mul_13, x_out_14, mul_14, x_out_15, mul_15, x_out_16, x_out_17, mul_16, x_out_18], Original ATen: [aten.add, aten.mul, aten.div]
# Source node to ATen node mapping:
# mul_1 => mul_1
# mul_10 => mul_10
# mul_11 => mul_11
# mul_12 => mul_12
# mul_13 => mul_13
# mul_14 => mul_14
# mul_15 => mul_15
# mul_16 => mul_16
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# mul_9 => mul_9
# x_out_1 => mul
# x_out_10 => add_9
# x_out_11 => add_10
# x_out_12 => add_11
# x_out_13 => add_12
# x_out_14 => add_13
# x_out_15 => add_14
# x_out_16 => add_15
# x_out_17 => div
# x_out_18 => add_16
# x_out_2 => add_1
# x_out_3 => add_2
# x_out_4 => add_3
# x_out_5 => add_4
# x_out_6 => add_5
# x_out_7 => add_6
# x_out_8 => add_7
# x_out_9 => add_8
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 0.9), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_1, 0.9), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_2, 0.9), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_3, 0.9), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_4, 0.9), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mul_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_5, 0.9), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_6, 0.9), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %mul_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_7, 0.9), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_7), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_8, 0.9), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mul_8), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_9, 0.9), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_9), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_10, 0.9), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_9, %mul_10), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_11, 0.9), kwargs = {})
# %add_11 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_10, %mul_11), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_12, 0.9), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mul_12), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_13, 0.9), kwargs = {})
# %add_13 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_12, %mul_13), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_14, 0.9), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_13, %mul_14), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_15, 0.9), kwargs = {})
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_14, %mul_15), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_15, 16), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.1), kwargs = {})
# %add_16 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %mul_16), kwargs = {})
triton_poi_fused_add_div_mul_0 = async_compile.triton('triton_poi_fused_add_div_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: '*fp32', 14: '*fp32', 15: '*fp32', 16: '*fp32', 17: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 17, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr0 + (x0), xmask)
tmp6 = tl.load(in_ptr1 + (x0), xmask)
tmp9 = tl.load(in_ptr2 + (x0), xmask)
tmp12 = tl.load(in_ptr3 + (x0), xmask)
tmp15 = tl.load(in_ptr4 + (x0), xmask)
tmp18 = tl.load(in_ptr5 + (x0), xmask)
tmp21 = tl.load(in_ptr6 + (x0), xmask)
tmp24 = tl.load(in_ptr7 + (x0), xmask)
tmp27 = tl.load(in_out_ptr1 + (x0), xmask)
tmp30 = tl.load(in_ptr8 + (x0), xmask)
tmp33 = tl.load(in_ptr9 + (x0), xmask)
tmp36 = tl.load(in_ptr10 + (x0), xmask)
tmp39 = tl.load(in_ptr11 + (x0), xmask)
tmp42 = tl.load(in_ptr12 + (x0), xmask)
tmp45 = tl.load(in_ptr13 + (x0), xmask)
tmp50 = tl.load(in_ptr14 + (x0), xmask)
tmp1 = 0.9
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp1
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tmp11 + tmp13
tmp16 = tmp15 * tmp1
tmp17 = tmp14 + tmp16
tmp19 = tmp18 * tmp1
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp1
tmp26 = tmp23 + tmp25
tmp28 = tmp27 * tmp1
tmp29 = tmp26 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tmp29 + tmp31
tmp34 = tmp33 * tmp1
tmp35 = tmp32 + tmp34
tmp37 = tmp36 * tmp1
tmp38 = tmp35 + tmp37
tmp40 = tmp39 * tmp1
tmp41 = tmp38 + tmp40
tmp43 = tmp42 * tmp1
tmp44 = tmp41 + tmp43
tmp46 = tmp45 * tmp1
tmp47 = tmp44 + tmp46
tmp48 = 0.0625
tmp49 = tmp47 * tmp48
tmp51 = 0.1
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tl.store(in_out_ptr1 + (x0), tmp53, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, arg0_1, out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf0, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf1, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf2, out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf3, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf4, out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf6, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf7, out=buf8)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf8, out=buf10)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf10, out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_11], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf11, out=buf12)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_12], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf12, out=buf13)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf13, out=buf14)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_14], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf14, out=buf15)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_15], Original ATen: [aten.mm]
extern_kernels.mm(arg1_1, buf15, out=buf16)
del arg1_1
buf9 = buf0; del buf0 # reuse
buf17 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [x_out_1, mul_1, x_out_2, mul_2, x_out_3, mul_3, x_out_4, mul_4, x_out_5, mul_5, x_out_6, mul_6, x_out_7, mul_7, x_out_8, mul_8, x_out_9, mul_9, x_out_10, mul_10, x_out_11, mul_11, x_out_12, mul_12, x_out_13, mul_13, x_out_14, mul_14, x_out_15, mul_15, x_out_16, x_out_17, mul_16, x_out_18], Original ATen: [aten.add, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_0.run(buf9, buf17, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf11, buf12, buf13, buf14, buf15, buf16, arg0_1, 16, grid=grid(16), stream=stream0)
del arg0_1
del buf1
del buf11
del buf12
del buf13
del buf14
del buf15
del buf16
del buf2
del buf3
del buf4
del buf5
del buf6
del buf7
del buf8
del buf9
return (buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
class SSGConv(Module):
def __init__(self, K=16, alpha=0.1, **kwargs):
super().__init__()
assert K > 0
self.K = K
self.alpha = alpha
def forward(self, x, adj):
x_in = x
x_out = torch.zeros_like(x)
for _ in range(self.K):
x = torch.spmm(adj, x)
x_out += (1 - self.alpha) * x
x_out /= self.K
x_out += self.alpha * x_in
return x_out
def reset_parameters(self):
pass
def extra_repr(self):
return f'K={self.K}, alpha={self.alpha}'
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.nn import Module
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_mul_0(in_out_ptr0, in_out_ptr1, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, in_ptr10, in_ptr11, in_ptr12, in_ptr13, in_ptr14, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr0 + x0, xmask)
tmp6 = tl.load(in_ptr1 + x0, xmask)
tmp9 = tl.load(in_ptr2 + x0, xmask)
tmp12 = tl.load(in_ptr3 + x0, xmask)
tmp15 = tl.load(in_ptr4 + x0, xmask)
tmp18 = tl.load(in_ptr5 + x0, xmask)
tmp21 = tl.load(in_ptr6 + x0, xmask)
tmp24 = tl.load(in_ptr7 + x0, xmask)
tmp27 = tl.load(in_out_ptr1 + x0, xmask)
tmp30 = tl.load(in_ptr8 + x0, xmask)
tmp33 = tl.load(in_ptr9 + x0, xmask)
tmp36 = tl.load(in_ptr10 + x0, xmask)
tmp39 = tl.load(in_ptr11 + x0, xmask)
tmp42 = tl.load(in_ptr12 + x0, xmask)
tmp45 = tl.load(in_ptr13 + x0, xmask)
tmp50 = tl.load(in_ptr14 + x0, xmask)
tmp1 = 0.9
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp1
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tmp11 + tmp13
tmp16 = tmp15 * tmp1
tmp17 = tmp14 + tmp16
tmp19 = tmp18 * tmp1
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp1
tmp26 = tmp23 + tmp25
tmp28 = tmp27 * tmp1
tmp29 = tmp26 + tmp28
tmp31 = tmp30 * tmp1
tmp32 = tmp29 + tmp31
tmp34 = tmp33 * tmp1
tmp35 = tmp32 + tmp34
tmp37 = tmp36 * tmp1
tmp38 = tmp35 + tmp37
tmp40 = tmp39 * tmp1
tmp41 = tmp38 + tmp40
tmp43 = tmp42 * tmp1
tmp44 = tmp41 + tmp43
tmp46 = tmp45 * tmp1
tmp47 = tmp44 + tmp46
tmp48 = 0.0625
tmp49 = tmp47 * tmp48
tmp51 = 0.1
tmp52 = tmp50 * tmp51
tmp53 = tmp49 + tmp52
tl.store(in_out_ptr1 + x0, tmp53, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, arg0_1, out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf0, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf1, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf2, out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf3, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf4, out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf6, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf7, out=buf8)
buf10 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf8, out=buf10)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf10, out=buf11)
buf12 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf11, out=buf12)
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf12, out=buf13)
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf13, out=buf14)
buf15 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf14, out=buf15)
buf16 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg1_1, buf15, out=buf16)
del arg1_1
buf9 = buf0
del buf0
buf17 = buf10
del buf10
get_raw_stream(0)
triton_poi_fused_add_div_mul_0[grid(16)](buf9, buf17, buf1, buf2,
buf3, buf4, buf5, buf6, buf7, buf8, buf11, buf12, buf13, buf14,
buf15, buf16, arg0_1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del buf1
del buf11
del buf12
del buf13
del buf14
del buf15
del buf16
del buf2
del buf3
del buf4
del buf5
del buf6
del buf7
del buf8
del buf9
return buf17,
class SSGConvNew(Module):
def __init__(self, K=16, alpha=0.1, **kwargs):
super().__init__()
assert K > 0
self.K = K
self.alpha = alpha
def reset_parameters(self):
pass
def extra_repr(self):
return f'K={self.K}, alpha={self.alpha}'
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EdisonLeeeee/GraphGallery
|
SSGConv
| false | 13,642 |
[
"MIT"
] | 300 |
4eec9c5136bda14809bd22584b26cc346cdb633b
|
https://github.com/EdisonLeeeee/GraphGallery/tree/4eec9c5136bda14809bd22584b26cc346cdb633b
|
WaveletConv
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/x5/cx57ndofhj4et5wnt2rbjkijbjsbca7tan7oleie57gahu7vejxk.py
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# h_2 => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %mm_1), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [h_2], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_4, buf1, buf2, 16, grid=grid(16), stream=stream0)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [h_2, out], Original ATen: [aten.mul, aten.mm]
extern_kernels.mm(primals_5, buf2, out=buf3)
del buf2
return (buf3, primals_2, primals_4, buf1, reinterpret_tensor(primals_5, (4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 1), (1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class WaveletConv(nn.Module):
def __init__(self, in_features, out_features, num_nodes, bias=False):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.w = nn.Linear(in_features, out_features, bias=bias)
self.filter = nn.Parameter(torch.ones(num_nodes, 1))
def reset_parameters(self):
self.w.reset_parameters()
self.filter.data.fill_(1.0)
def forward(self, x, wavelet, inverse_wavelet):
h = self.w(x)
h = inverse_wavelet.mm(h)
h = self.filter * h
out = wavelet.mm(h)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_features}, {self.out_features})'
)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4, 'num_nodes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 1), (1, 1))
assert_size_stride(primals_5, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_mul_0[grid(16)](primals_4, buf1, buf2, 16, XBLOCK=
16, num_warps=1, num_stages=1)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_5, buf2, out=buf3)
del buf2
return buf3, primals_2, primals_4, buf1, reinterpret_tensor(primals_5,
(4, 4), (1, 4), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0)
class WaveletConvNew(nn.Module):
def __init__(self, in_features, out_features, num_nodes, bias=False):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.w = nn.Linear(in_features, out_features, bias=bias)
self.filter = nn.Parameter(torch.ones(num_nodes, 1))
def reset_parameters(self):
self.w.reset_parameters()
self.filter.data.fill_(1.0)
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_features}, {self.out_features})'
)
def forward(self, input_0, input_1, input_2):
primals_4 = self.filter
primals_1 = self.w.weight
primals_2 = input_0
primals_3 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
EdisonLeeeee/GraphGallery
|
WaveletConv
| false | 13,643 |
[
"MIT"
] | 300 |
4eec9c5136bda14809bd22584b26cc346cdb633b
|
https://github.com/EdisonLeeeee/GraphGallery/tree/4eec9c5136bda14809bd22584b26cc346cdb633b
|
ResidualBlock
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/v6/cv6oewqqnsshd7he7ylh2kikzu4smtrhj2dmv6nb5csosp7g6vw5.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t6/ct6syu6rq3n7yx3zuog2yujcrfreefdccraqz7zj2m3c5xhvp5vl.py
# Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# instance_norm => add, rsqrt, var_mean
# out_1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (16*x3)), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + (16*x3)), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp23, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3g/c3gbhm3y6wldudvsxdmmjh5ssg2uys5qqk3dd3k7bxnuot4xhndp.py
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# instance_norm => repeat
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {})
triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6x/c6xlvvnj6ftmp7jka4547n3hpffcz5xr3op3wtbpv5povsb6rjue.py
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_2 => relu
# out_3 => _unsafe_index_2, _unsafe_index_3
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = (xindex // 6) % 6
x2 = (xindex // 36)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-1) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-1) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f6/cf6z47jjg5avrtu2dn7ifr4mx5pcq57zzoksapbtrbrglr3gqnxi.py
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
# Source node to ATen node mapping:
# out_4 => convolution_1
# out_5 => add_2, repeat_2, rsqrt_1, var_mean_1
# out_6 => add_4
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_8, [4]), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_3, %primals_1), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x0 % 4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + (16*x0)), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + (r3 + (16*x0)), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 16.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x0), tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + (16*x0)), tmp3, xmask)
tl.store(out_ptr3 + (r3 + (16*x0)), tmp31, xmask)
tl.store(out_ptr4 + (x0), tmp25, xmask)
tl.store(out_ptr1 + (x0), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4, ), (1, ))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_1.run(buf2, buf8, primals_3, buf5, 16, 16, grid=grid(16), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_4, buf3, 16, grid=grid(16), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_5, buf4, 16, grid=grid(16), stream=stream0)
del primals_5
buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_2, out_3], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_3.run(buf2, buf5, buf8, buf3, buf4, buf9, 576, grid=grid(576), stream=stream0)
# Topologically Sorted Source Nodes: [out_4], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = empty_strided_cuda((16, ), (1, ), torch.float32)
buf11 = buf10; del buf10 # reuse
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_4, out_5, out_6], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4.run(buf11, primals_8, primals_7, primals_9, primals_1, buf12, buf13, buf17, buf16, 16, 16, grid=grid(16), stream=stream0)
del primals_1
del primals_7
del primals_8
del primals_9
return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, reinterpret_tensor(buf16, (16, ), (1, ), 0), reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 16 * x3), xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = 16.0
tmp20 = tmp18 / tmp19
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(in_out_ptr0 + (r2 + 16 * x3), tmp2, xmask)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp23, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 6
x1 = xindex // 6 % 6
x2 = xindex // 36
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-1 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-1 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr1,
out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 4
tmp0 = tl.load(in_ptr0 + x0 % 4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + 16 * x0), xmask, other=0.0)
tmp2 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr3 + (r3 + 16 * x0), xmask, other=0.0)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tl.where(xmask, tmp4, 0)
tmp7 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp9 = tl.where(xmask, tmp7, 0)
tmp10 = tl.sum(tmp9, 1)[:, None]
tmp11 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp12 = tmp11.to(tl.float32)
tmp13 = tmp10 / tmp12
tmp14 = tmp4 - tmp13
tmp15 = tmp14 * tmp14
tmp16 = tl.broadcast_to(tmp15, [XBLOCK, RBLOCK])
tmp18 = tl.where(xmask, tmp16, 0)
tmp19 = tl.sum(tmp18, 1)[:, None]
tmp20 = tmp3 - tmp13
tmp21 = 16.0
tmp22 = tmp19 / tmp21
tmp23 = 1e-05
tmp24 = tmp22 + tmp23
tmp25 = libdevice.rsqrt(tmp24)
tmp26 = tmp20 * tmp25
tmp27 = tmp26 * tmp0
tmp29 = tmp27 + tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x0, tmp0, xmask)
tl.store(in_out_ptr0 + (r3 + 16 * x0), tmp3, xmask)
tl.store(out_ptr3 + (r3 + 16 * x0), tmp31, xmask)
tl.store(out_ptr4 + x0, tmp25, xmask)
tl.store(out_ptr1 + x0, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_7, (4,), (1,))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(576)](primals_1, buf0, 576,
XBLOCK=128, num_warps=4, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
buf5 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf8 = reinterpret_tensor(buf6, (1, 16, 1, 1), (16, 1, 1, 1), 0)
del buf6
triton_per_fused__native_batch_norm_legit_convolution_1[grid(16)](buf2,
buf8, primals_3, buf5, 16, 16, XBLOCK=1, num_warps=2, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_4, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(16)](primals_5, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 4, 6, 6), (144, 36, 6, 1), torch.float32)
triton_poi_fused_reflection_pad2d_relu_3[grid(576)](buf2, buf5,
buf8, buf3, buf4, buf9, 576, XBLOCK=256, num_warps=4, num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 4, 4, 4), (64, 16, 4, 1))
buf12 = empty_strided_cuda((16,), (1,), torch.float32)
buf11 = buf10
del buf10
buf13 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
buf17 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf16 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.
float32)
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_4[grid
(16)](buf11, primals_8, primals_7, primals_9, primals_1, buf12,
buf13, buf17, buf16, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_1
del primals_7
del primals_8
del primals_9
return (buf17, primals_2, primals_6, buf0, buf2, buf3, buf4, buf5, buf8,
buf9, buf11, buf12, reinterpret_tensor(buf16, (16,), (1,), 0),
reinterpret_tensor(buf13, (1, 16, 1, 1), (16, 1, 1, 1), 0))
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlockNew(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlockNew, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, input_0):
primals_2 = self.conv1.conv2d.weight
primals_3 = self.conv1.conv2d.bias
primals_4 = self.in1.weight
primals_5 = self.in1.bias
primals_6 = self.conv2.conv2d.weight
primals_7 = self.conv2.conv2d.bias
primals_8 = self.in2.weight
primals_9 = self.in2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
EdenBD/MultiModalStory-demo
|
ResidualBlock
| false | 13,644 |
[
"Apache-2.0"
] | 154 |
5e95e2aca766ca7c850e8db4973b8d51dfdba7f8
|
https://github.com/EdenBD/MultiModalStory-demo/tree/5e95e2aca766ca7c850e8db4973b8d51dfdba7f8
|
FocalFrequencyLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ft/cftomyvautj5vxsi2pgrqoglpefc2yd5kbt5xqr43qzmwf45zrur.py
# Topologically Sorted Source Nodes: [freq_1], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# freq_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], -1), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (2*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + (1 + (2*x1)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), torch.complex64)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0.copy_(reinterpret_tensor(arg0_1, (4, 1, 4, 4, 4), (64, 64, 16, 4, 1), 0))
del arg0_1
# Topologically Sorted Source Nodes: [freq], Original ATen: [aten._fft_c2c]
buf2 = torch.ops.aten._fft_c2c.default(buf0, [3, 4], 1, True)
del buf0
buf3 = buf2
del buf2
# Topologically Sorted Source Nodes: [getattr_1], Original ATen: [aten.view_as_real]
buf4 = torch.ops.aten.view_as_real.default(buf3)
buf5 = buf4
# Topologically Sorted Source Nodes: [getattr_2], Original ATen: [aten.view_as_real]
buf6 = torch.ops.aten.view_as_real.default(buf3)
buf7 = buf6
buf8 = empty_strided_cuda((4, 1, 4, 4, 4, 2), (128, 128, 32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [freq_1], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(buf5, buf7, buf8, 512, grid=grid(512), stream=stream0)
del buf4
del buf5
del buf6
del buf7
buf9 = buf3; del buf3 # reuse
buf9.copy_(reinterpret_tensor(arg1_1, (4, 1, 4, 4, 4), (64, 64, 16, 4, 1), 0))
del arg1_1
# Topologically Sorted Source Nodes: [freq_2], Original ATen: [aten._fft_c2c]
buf11 = torch.ops.aten._fft_c2c.default(buf9, [3, 4], 1, True)
del buf9
buf12 = buf11
del buf11
# Topologically Sorted Source Nodes: [getattr_3], Original ATen: [aten.view_as_real]
buf13 = torch.ops.aten.view_as_real.default(buf12)
buf14 = buf13
# Topologically Sorted Source Nodes: [getattr_4], Original ATen: [aten.view_as_real]
buf15 = torch.ops.aten.view_as_real.default(buf12)
buf16 = buf15
buf17 = empty_strided_cuda((4, 1, 4, 4, 4, 2), (128, 128, 32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [freq_3], Original ATen: [aten.stack]
triton_poi_fused_stack_0.run(buf14, buf16, buf17, 512, grid=grid(512), stream=stream0)
del buf12
del buf13
del buf14
del buf15
del buf16
return (buf8, buf17, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.utils.data
class FocalFrequencyLoss(nn.Module):
"""The torch.nn.Module class that implements focal frequency loss - a
frequency domain loss function for optimizing generative models.
Ref:
Focal Frequency Loss for Image Reconstruction and Synthesis. In ICCV 2021.
<https://arxiv.org/pdf/2012.12821.pdf>
Args:
loss_weight (float): weight for focal frequency loss. Default: 1.0
alpha (float): the scaling factor alpha of the spectrum weight matrix for flexibility. Default: 1.0
patch_factor (int): the factor to crop image patches for patch-based focal frequency loss. Default: 1
ave_spectrum (bool): whether to use minibatch average spectrum. Default: False
log_matrix (bool): whether to adjust the spectrum weight matrix by logarithm. Default: False
batch_matrix (bool): whether to calculate the spectrum weight matrix using batch-based statistics. Default: False
"""
def __init__(self, loss_weight=1.0, alpha=1.0, patch_factor=1,
ave_spectrum=False, log_matrix=False, batch_matrix=False):
super(FocalFrequencyLoss, self).__init__()
self.loss_weight = loss_weight
self.alpha = alpha
self.patch_factor = patch_factor
self.ave_spectrum = ave_spectrum
self.log_matrix = log_matrix
self.batch_matrix = batch_matrix
def tensor2freq(self, x):
patch_factor = self.patch_factor
_, _, h, w = x.shape
assert h % patch_factor == 0 and w % patch_factor == 0, 'Patch factor should be divisible by image height and width'
patch_list = []
patch_h = h // patch_factor
patch_w = w // patch_factor
for i in range(patch_factor):
for j in range(patch_factor):
patch_list.append(x[:, :, i * patch_h:(i + 1) * patch_h, j *
patch_w:(j + 1) * patch_w])
y = torch.stack(patch_list, 1)
if torch.__version__.split('+')[0] > '1.7.1':
freq = torch.fft.fft2(y, norm='ortho')
freq = torch.stack([freq.real, freq.imag], -1)
else:
freq = torch.rfft(y, 2, onesided=False, normalized=True)
return freq
def loss_formulation(self, recon_freq, real_freq, matrix=None):
if matrix is not None:
weight_matrix = matrix.detach()
else:
matrix_tmp = (recon_freq - real_freq) ** 2
matrix_tmp = torch.sqrt(matrix_tmp[..., 0] + matrix_tmp[..., 1]
) ** self.alpha
if self.log_matrix:
matrix_tmp = torch.log(matrix_tmp + 1.0)
if self.batch_matrix:
matrix_tmp = matrix_tmp / matrix_tmp.max()
else:
matrix_tmp = matrix_tmp / matrix_tmp.max(-1).values.max(-1
).values[:, :, :, None, None]
matrix_tmp[torch.isnan(matrix_tmp)] = 0.0
matrix_tmp = torch.clamp(matrix_tmp, min=0.0, max=1.0)
weight_matrix = matrix_tmp.clone().detach()
assert weight_matrix.min().item() >= 0 and weight_matrix.max().item(
) <= 1, 'The values of spectrum weight matrix should be in the range [0, 1], but got Min: %.10f Max: %.10f' % (
weight_matrix.min().item(), weight_matrix.max().item())
tmp = (recon_freq - real_freq) ** 2
freq_distance = tmp[..., 0] + tmp[..., 1]
loss = weight_matrix * freq_distance
return torch.mean(loss)
def forward(self, pred, target, matrix=None, **kwargs):
"""Forward function to calculate focal frequency loss.
Args:
pred (torch.Tensor): of shape (N, C, H, W). Predicted tensor.
target (torch.Tensor): of shape (N, C, H, W). Target tensor.
matrix (torch.Tensor, optional): Element-wise spectrum weight matrix.
Default: None (If set to None: calculated online, dynamic).
"""
pred_freq = self.tensor2freq(pred)
target_freq = self.tensor2freq(target)
if self.ave_spectrum:
pred_freq = torch.mean(pred_freq, 0, keepdim=True)
target_freq = torch.mean(target_freq, 0, keepdim=True)
return self.loss_formulation(pred_freq, target_freq, matrix
) * self.loss_weight
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 2 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp9 = tl.load(in_ptr1 + (1 + 2 * x1), tmp6 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
buf0 = empty_strided_cuda((4, 1, 4, 4, 4), (64, 64, 16, 4, 1), torch.
complex64)
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0.copy_(reinterpret_tensor(arg0_1, (4, 1, 4, 4, 4), (64, 64, 16,
4, 1), 0))
del arg0_1
buf2 = torch.ops.aten._fft_c2c.default(buf0, [3, 4], 1, True)
del buf0
buf3 = buf2
del buf2
buf4 = torch.ops.aten.view_as_real.default(buf3)
buf5 = buf4
buf6 = torch.ops.aten.view_as_real.default(buf3)
buf7 = buf6
buf8 = empty_strided_cuda((4, 1, 4, 4, 4, 2), (128, 128, 32, 8, 2,
1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(512)](buf5, buf7, buf8, 512, XBLOCK=
256, num_warps=4, num_stages=1)
del buf4
del buf5
del buf6
del buf7
buf9 = buf3
del buf3
buf9.copy_(reinterpret_tensor(arg1_1, (4, 1, 4, 4, 4), (64, 64, 16,
4, 1), 0))
del arg1_1
buf11 = torch.ops.aten._fft_c2c.default(buf9, [3, 4], 1, True)
del buf9
buf12 = buf11
del buf11
buf13 = torch.ops.aten.view_as_real.default(buf12)
buf14 = buf13
buf15 = torch.ops.aten.view_as_real.default(buf12)
buf16 = buf15
buf17 = empty_strided_cuda((4, 1, 4, 4, 4, 2), (128, 128, 32, 8, 2,
1), torch.float32)
triton_poi_fused_stack_0[grid(512)](buf14, buf16, buf17, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del buf12
del buf13
del buf14
del buf15
del buf16
return buf8, buf17
class FocalFrequencyLossNew(nn.Module):
"""The torch.nn.Module class that implements focal frequency loss - a
frequency domain loss function for optimizing generative models.
Ref:
Focal Frequency Loss for Image Reconstruction and Synthesis. In ICCV 2021.
<https://arxiv.org/pdf/2012.12821.pdf>
Args:
loss_weight (float): weight for focal frequency loss. Default: 1.0
alpha (float): the scaling factor alpha of the spectrum weight matrix for flexibility. Default: 1.0
patch_factor (int): the factor to crop image patches for patch-based focal frequency loss. Default: 1
ave_spectrum (bool): whether to use minibatch average spectrum. Default: False
log_matrix (bool): whether to adjust the spectrum weight matrix by logarithm. Default: False
batch_matrix (bool): whether to calculate the spectrum weight matrix using batch-based statistics. Default: False
"""
def __init__(self, loss_weight=1.0, alpha=1.0, patch_factor=1,
ave_spectrum=False, log_matrix=False, batch_matrix=False):
super(FocalFrequencyLossNew, self).__init__()
self.loss_weight = loss_weight
self.alpha = alpha
self.patch_factor = patch_factor
self.ave_spectrum = ave_spectrum
self.log_matrix = log_matrix
self.batch_matrix = batch_matrix
def tensor2freq(self, x):
patch_factor = self.patch_factor
_, _, h, w = x.shape
assert h % patch_factor == 0 and w % patch_factor == 0, 'Patch factor should be divisible by image height and width'
patch_list = []
patch_h = h // patch_factor
patch_w = w // patch_factor
for i in range(patch_factor):
for j in range(patch_factor):
patch_list.append(x[:, :, i * patch_h:(i + 1) * patch_h, j *
patch_w:(j + 1) * patch_w])
y = torch.stack(patch_list, 1)
if torch.__version__.split('+')[0] > '1.7.1':
freq = torch.fft.fft2(y, norm='ortho')
freq = torch.stack([freq.real, freq.imag], -1)
else:
freq = torch.rfft(y, 2, onesided=False, normalized=True)
return freq
def loss_formulation(self, recon_freq, real_freq, matrix=None):
if matrix is not None:
weight_matrix = matrix.detach()
else:
matrix_tmp = (recon_freq - real_freq) ** 2
matrix_tmp = torch.sqrt(matrix_tmp[..., 0] + matrix_tmp[..., 1]
) ** self.alpha
if self.log_matrix:
matrix_tmp = torch.log(matrix_tmp + 1.0)
if self.batch_matrix:
matrix_tmp = matrix_tmp / matrix_tmp.max()
else:
matrix_tmp = matrix_tmp / matrix_tmp.max(-1).values.max(-1
).values[:, :, :, None, None]
matrix_tmp[torch.isnan(matrix_tmp)] = 0.0
matrix_tmp = torch.clamp(matrix_tmp, min=0.0, max=1.0)
weight_matrix = matrix_tmp.clone().detach()
assert weight_matrix.min().item() >= 0 and weight_matrix.max().item(
) <= 1, 'The values of spectrum weight matrix should be in the range [0, 1], but got Min: %.10f Max: %.10f' % (
weight_matrix.min().item(), weight_matrix.max().item())
tmp = (recon_freq - real_freq) ** 2
freq_distance = tmp[..., 0] + tmp[..., 1]
loss = weight_matrix * freq_distance
return torch.mean(loss)
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EndlessSora/focal-frequency-loss
|
FocalFrequencyLoss
| false | 13,645 |
[
"MIT"
] | 364 |
dcaa01ecbfbbd9d8f83f7e5993474e1aa087227c
|
https://github.com/EndlessSora/focal-frequency-loss/tree/dcaa01ecbfbbd9d8f83f7e5993474e1aa087227c
|
TAGConv
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7v/c7vxymjug6cmd6ortn7oen7sxreksunepw2tsdycutut57s4epmp.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %mm, %mm_1, %mm_2], -1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (16*x1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/i3/ci3ntqdazryccf7cw2urpyjx3oine2oobozzcepsiti6ioemdsor.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %mm, %mm_1, %mm_2], -1), kwargs = {})
triton_poi_fused_cat_1 = async_compile.triton('triton_poi_fused_cat_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tl.store(out_ptr0 + (x0 + (16*x1)), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 16), (16, 1))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, primals_1, out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, buf0, out=buf1)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
buf2 = reinterpret_tensor(buf6, (4, 4), (16, 1), 12) # alias
# Topologically Sorted Source Nodes: [out_2], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, buf1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf6, (4, 4), (16, 1), 0) # alias
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf3, 16, grid=grid(16), stream=stream0)
del primals_1
buf4 = reinterpret_tensor(buf6, (4, 4), (16, 1), 4) # alias
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf0, buf4, 16, grid=grid(16), stream=stream0)
del buf0
buf5 = reinterpret_tensor(buf6, (4, 4), (16, 1), 8) # alias
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_1.run(buf1, buf5, 16, grid=grid(16), stream=stream0)
buf7 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_4, buf6, reinterpret_tensor(primals_3, (16, 4), (1, 16), 0), alpha=1, beta=1, out=buf7)
del primals_3
del primals_4
return (buf7, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 16), (16, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class TAGConv(nn.Module):
def __init__(self, in_features, out_features, K=3, bias=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.K = K
self.w = nn.Linear(in_features * (self.K + 1), out_features, bias=bias)
def reset_parameters(self):
self.w.reset_parameters()
def forward(self, x, adj):
out = x
xs = [x]
for _ in range(self.K):
out = adj.mm(out)
xs.append(out)
out = self.w(torch.cat(xs, dim=-1))
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K})'
)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 16 * x1), tmp0, xmask)
@triton.jit
def triton_poi_fused_cat_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tl.store(out_ptr0 + (x0 + 16 * x1), tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 16), (16, 1))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, primals_1, out=buf0)
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, buf0, out=buf1)
buf6 = empty_strided_cuda((4, 16), (16, 1), torch.float32)
buf2 = reinterpret_tensor(buf6, (4, 4), (16, 1), 12)
extern_kernels.mm(primals_2, buf1, out=buf2)
del primals_2
buf3 = reinterpret_tensor(buf6, (4, 4), (16, 1), 0)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(16)](primals_1, buf3, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del primals_1
buf4 = reinterpret_tensor(buf6, (4, 4), (16, 1), 4)
triton_poi_fused_cat_1[grid(16)](buf0, buf4, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf0
buf5 = reinterpret_tensor(buf6, (4, 4), (16, 1), 8)
triton_poi_fused_cat_1[grid(16)](buf1, buf5, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf7 = buf1
del buf1
extern_kernels.addmm(primals_4, buf6, reinterpret_tensor(primals_3,
(16, 4), (1, 16), 0), alpha=1, beta=1, out=buf7)
del primals_3
del primals_4
return buf7, buf6
class TAGConvNew(nn.Module):
def __init__(self, in_features, out_features, K=3, bias=True):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.K = K
self.w = nn.Linear(in_features * (self.K + 1), out_features, bias=bias)
def reset_parameters(self):
self.w.reset_parameters()
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K})'
)
def forward(self, input_0, input_1):
primals_3 = self.w.weight
primals_4 = self.w.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
EdisonLeeeee/GraphGallery
|
TAGConv
| false | 13,646 |
[
"MIT"
] | 300 |
4eec9c5136bda14809bd22584b26cc346cdb633b
|
https://github.com/EdisonLeeeee/GraphGallery/tree/4eec9c5136bda14809bd22584b26cc346cdb633b
|
GlobalAvgPool2d
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# adaptive_avg_pool2d => mean
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%arg0_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [adaptive_avg_pool2d], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, arg0_1, 16, 16, grid=grid(16), stream=stream0)
del arg0_1
return (reinterpret_tensor(buf1, (4, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class GlobalAvgPool2d(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2d, self).__init__()
def forward(self, inputs):
return nn.functional.adaptive_avg_pool2d(inputs, 1).view(inputs.
size(0), -1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, arg0_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
del arg0_1
return reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
class GlobalAvgPool2dNew(nn.Module):
def __init__(self):
"""Global average pooling over the input's spatial dimensions"""
super(GlobalAvgPool2dNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Exdenta/torchsat
|
GlobalAvgPool2d
| false | 13,647 |
[
"MIT"
] | 316 |
70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
https://github.com/Exdenta/torchsat/tree/70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
DiceBCELoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xa/cxaldjb3lyrttpd6oyeccpxx6xt35jg65irbgseyklqochxbvrit.py
# Topologically Sorted Source Nodes: [BCE, mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, truediv, dice_loss, Dice_BCE], Original ATen: [aten.binary_cross_entropy, aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# BCE => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul_2, mul_3, neg, sub_1, sub_2
# Dice_BCE => add_3
# add => add
# add_1 => add_1
# add_2 => add_2
# dice_loss => sub
# intersection => sum_1
# mul => mul
# mul_1 => mul_1
# sum_2 => sum_2
# sum_3 => sum_3
# truediv => div
# Graph fragment:
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, 1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%view,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %maximum), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%view,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %maximum_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_2, %mul_3), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sub_2,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mean, %sub), kwargs = {})
triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = -tmp4
tmp6 = libdevice.log1p(tmp5)
tmp7 = -100.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tl_math.log(tmp4)
tmp11 = triton_helpers.maximum(tmp10, tmp7)
tmp12 = tmp0 * tmp11
tmp13 = tmp9 - tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp4 * tmp0
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tl.broadcast_to(tmp4, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp24 = tl.broadcast_to(tmp0, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp16 / tmp27
tmp29 = 2.0
tmp30 = tmp20 * tmp29
tmp31 = tmp30 + tmp1
tmp32 = tmp23 + tmp26
tmp33 = tmp32 + tmp1
tmp34 = tmp31 / tmp33
tmp35 = tmp1 - tmp34
tmp36 = tmp28 + tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp36, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [BCE, mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, truediv, dice_loss, Dice_BCE], Original ATen: [aten.binary_cross_entropy, aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0.run(buf4, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DiceBCELoss(nn.Module):
"""
This loss combines Dice loss with the standard binary cross-entropy (BCE) loss that is generally the default for segmentation models.
Combining the two methods allows for some diversity in the loss, while benefitting from the stability of BCE.
"""
def __init__(self, weight=None, size_average=True):
super(DiceBCELoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
inputs = F.sigmoid(inputs)
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
dice_loss = 1 - (2.0 * intersection + smooth) / (inputs.sum() +
targets.sum() + smooth)
BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')
Dice_BCE = BCE + dice_loss
return Dice_BCE
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = -tmp4
tmp6 = libdevice.log1p(tmp5)
tmp7 = -100.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tl_math.log(tmp4)
tmp11 = triton_helpers.maximum(tmp10, tmp7)
tmp12 = tmp0 * tmp11
tmp13 = tmp9 - tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp4 * tmp0
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tl.broadcast_to(tmp4, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp24 = tl.broadcast_to(tmp0, [RBLOCK])
tmp26 = triton_helpers.promote_to_tensor(tl.sum(tmp24, 0))
tmp27 = 256.0
tmp28 = tmp16 / tmp27
tmp29 = 2.0
tmp30 = tmp20 * tmp29
tmp31 = tmp30 + tmp1
tmp32 = tmp23 + tmp26
tmp33 = tmp32 + tmp1
tmp34 = tmp31 / tmp33
tmp35 = tmp1 - tmp34
tmp36 = tmp28 + tmp35
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp36, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf4 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_binary_cross_entropy_div_mul_rsub_sum_0[grid(1)](
buf4, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf4,
class DiceBCELossNew(nn.Module):
"""
This loss combines Dice loss with the standard binary cross-entropy (BCE) loss that is generally the default for segmentation models.
Combining the two methods allows for some diversity in the loss, while benefitting from the stability of BCE.
"""
def __init__(self, weight=None, size_average=True):
super(DiceBCELossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Exdenta/torchsat
|
DiceBCELoss
| false | 13,648 |
[
"MIT"
] | 316 |
70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
https://github.com/Exdenta/torchsat/tree/70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
CharbonnierLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/4o/c4oqy72fdaliouj3mb6dz74zmds2djttl7pvwrhlac4244bp4hf7.py
# Topologically Sorted Source Nodes: [diff, mul, add, sqrt, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.sqrt, aten.sum]
# Source node to ATen node mapping:
# add => add
# diff => sub
# loss => sum_1
# mul => mul
# sqrt => sqrt
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %sub), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1e-06), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%add,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sqrt,), kwargs = {})
triton_per_fused_add_mul_sqrt_sub_sum_0 = async_compile.triton('triton_per_fused_add_mul_sqrt_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_mul_sqrt_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_mul_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1e-06
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [diff, mul, add, sqrt, loss], Original ATen: [aten.sub, aten.mul, aten.add, aten.sqrt, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_mul_sqrt_sub_sum_0.run(arg0_1, arg1_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
import torch.nn as nn
class CharbonnierLoss(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-06):
super(CharbonnierLoss, self).__init__()
self.eps = eps
def forward(self, x, y):
diff = x - y
loss = torch.sum(torch.sqrt(diff * diff + self.eps))
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_mul_sqrt_sub_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = 1e-06
tmp5 = tmp3 + tmp4
tmp6 = libdevice.sqrt(tmp5)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_mul_sqrt_sub_sum_0[grid(1)](arg0_1, arg1_1,
buf0, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class CharbonnierLossNew(nn.Module):
"""Charbonnier Loss (L1)"""
def __init__(self, eps=1e-06):
super(CharbonnierLossNew, self).__init__()
self.eps = eps
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EvgeneyZ/TMNet
|
CharbonnierLoss
| false | 13,649 |
[
"Apache-2.0"
] | 90 |
8a42754747c2fa575e9108c13b5018a884f46099
|
https://github.com/EvgeneyZ/TMNet/tree/8a42754747c2fa575e9108c13b5018a884f46099
|
SpectralEigenConv
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/37/c37syvo63vdmotuxpfmllazayh63ckflzom725nffg466fmtlhph.py
# Topologically Sorted Source Nodes: [x_out_1, mul_1, x_out_2, mul_2, x_out_3, mul_3, x_out_4, mul_4, x_out_5, mul_5, x_out_6, mul_6, x_out_7, mul_7, x_out_8, mul_8, x_out_9, mul_9, x_out_10, x_out_11, mul_10, x_out_12], Original ATen: [aten.add, aten.mul, aten.div]
# Source node to ATen node mapping:
# mul_1 => mul_1
# mul_10 => mul_10
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# mul_9 => mul_9
# x_out_1 => mul
# x_out_10 => add_9
# x_out_11 => div
# x_out_12 => add_10
# x_out_2 => add_1
# x_out_3 => add_2
# x_out_4 => add_3
# x_out_5 => add_4
# x_out_6 => add_5
# x_out_7 => add_6
# x_out_8 => add_7
# x_out_9 => add_8
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_1, 0.9), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_2, 0.9), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_3, 0.9), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_2), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_4, 0.9), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_3), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_5, 0.9), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %mul_4), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_6, 0.9), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_5), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_7, 0.9), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %mul_6), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_8, 0.9), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_6, %mul_7), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_9, 0.9), kwargs = {})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_7, %mul_8), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm_10, 0.9), kwargs = {})
# %add_9 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_8, %mul_9), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_9, 10), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mm, 0.1), kwargs = {})
# %add_10 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div, %mul_10), kwargs = {})
triton_poi_fused_add_div_mul_0 = async_compile.triton('triton_poi_fused_add_div_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mul_0', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 11, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mul_0(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr0 + (x0), xmask)
tmp6 = tl.load(in_ptr1 + (x0), xmask)
tmp9 = tl.load(in_ptr2 + (x0), xmask)
tmp12 = tl.load(in_ptr3 + (x0), xmask)
tmp15 = tl.load(in_ptr4 + (x0), xmask)
tmp18 = tl.load(in_ptr5 + (x0), xmask)
tmp21 = tl.load(in_ptr6 + (x0), xmask)
tmp24 = tl.load(in_ptr7 + (x0), xmask)
tmp27 = tl.load(in_ptr8 + (x0), xmask)
tmp32 = tl.load(in_out_ptr1 + (x0), xmask)
tmp1 = 0.9
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp1
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tmp11 + tmp13
tmp16 = tmp15 * tmp1
tmp17 = tmp14 + tmp16
tmp19 = tmp18 * tmp1
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp1
tmp26 = tmp23 + tmp25
tmp28 = tmp27 * tmp1
tmp29 = tmp26 + tmp28
tmp30 = 0.1
tmp31 = tmp29 * tmp30
tmp33 = tmp32 * tmp30
tmp34 = tmp31 + tmp33
tl.store(in_out_ptr1 + (x0), tmp34, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mm]
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf1, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf2, out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf3, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf4, out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_6], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf6, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_8], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf7, out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_9], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf8, out=buf9)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.mm]
extern_kernels.mm(primals_3, buf9, out=buf11)
buf10 = buf1; del buf1 # reuse
buf12 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [x_out_1, mul_1, x_out_2, mul_2, x_out_3, mul_3, x_out_4, mul_4, x_out_5, mul_5, x_out_6, mul_6, x_out_7, mul_7, x_out_8, mul_8, x_out_9, mul_9, x_out_10, x_out_11, mul_10, x_out_12], Original ATen: [aten.add, aten.mul, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_mul_0.run(buf10, buf12, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf9, buf11, 16, grid=grid(16), stream=stream0)
del buf10
del buf11
del buf2
del buf3
del buf4
del buf5
del buf6
del buf7
del buf8
del buf9
return (buf12, primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class SpectralEigenConv(nn.Module):
def __init__(self, in_features, out_features, bias=False, K=10, alpha=
0.1, **kwargs):
super().__init__()
assert K > 0
self.K = K
self.alpha = alpha
self.in_features = in_features
self.out_features = out_features
self.w = nn.Linear(in_features, out_features, bias=bias)
def forward(self, x, U, V=None):
"""
x: node attribute matrix
if `V=None`:
U: (N, N) adjacency matrix
else:
U: (N, k) eigenvector matrix
V: (k,) eigenvalue
"""
x = self.w(x)
if V is not None:
V_pow = torch.ones_like(V)
V_out = torch.zeros_like(V)
for _ in range(self.K):
V_pow *= V
V_out += (1 - self.alpha) * V_pow
V_out = V_out / self.K
x_out = U * V_out @ (U.t() @ x) + self.alpha * x
else:
adj = U
x_in = x
x_out = torch.zeros_like(x)
for _ in range(self.K):
x = torch.spmm(adj, x)
x_out += (1 - self.alpha) * x
x_out /= self.K
x_out += self.alpha * x_in
return x_out
def reset_parameters(self):
self.w.reset_parameters()
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K}, alpha={self.alpha})'
)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_mul_0(in_out_ptr0, in_out_ptr1, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr0 + x0, xmask)
tmp6 = tl.load(in_ptr1 + x0, xmask)
tmp9 = tl.load(in_ptr2 + x0, xmask)
tmp12 = tl.load(in_ptr3 + x0, xmask)
tmp15 = tl.load(in_ptr4 + x0, xmask)
tmp18 = tl.load(in_ptr5 + x0, xmask)
tmp21 = tl.load(in_ptr6 + x0, xmask)
tmp24 = tl.load(in_ptr7 + x0, xmask)
tmp27 = tl.load(in_ptr8 + x0, xmask)
tmp32 = tl.load(in_out_ptr1 + x0, xmask)
tmp1 = 0.9
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp1
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp1
tmp11 = tmp8 + tmp10
tmp13 = tmp12 * tmp1
tmp14 = tmp11 + tmp13
tmp16 = tmp15 * tmp1
tmp17 = tmp14 + tmp16
tmp19 = tmp18 * tmp1
tmp20 = tmp17 + tmp19
tmp22 = tmp21 * tmp1
tmp23 = tmp20 + tmp22
tmp25 = tmp24 * tmp1
tmp26 = tmp23 + tmp25
tmp28 = tmp27 * tmp1
tmp29 = tmp26 + tmp28
tmp30 = 0.1
tmp31 = tmp29 * tmp30
tmp33 = tmp32 * tmp30
tmp34 = tmp31 + tmp33
tl.store(in_out_ptr1 + x0, tmp34, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_2, reinterpret_tensor(primals_1, (4, 4),
(1, 4), 0), out=buf0)
del primals_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf0, out=buf1)
buf2 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf1, out=buf2)
buf3 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf2, out=buf3)
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf3, out=buf4)
buf5 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf4, out=buf5)
buf6 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf5, out=buf6)
buf7 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf6, out=buf7)
buf8 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf7, out=buf8)
buf9 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf8, out=buf9)
buf11 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(primals_3, buf9, out=buf11)
buf10 = buf1
del buf1
buf12 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_div_mul_0[grid(16)](buf10, buf12, buf2, buf3,
buf4, buf5, buf6, buf7, buf8, buf9, buf11, 16, XBLOCK=16,
num_warps=1, num_stages=1)
del buf10
del buf11
del buf2
del buf3
del buf4
del buf5
del buf6
del buf7
del buf8
del buf9
return buf12, primals_2, reinterpret_tensor(primals_3, (4, 4), (1, 4), 0)
class SpectralEigenConvNew(nn.Module):
def __init__(self, in_features, out_features, bias=False, K=10, alpha=
0.1, **kwargs):
super().__init__()
assert K > 0
self.K = K
self.alpha = alpha
self.in_features = in_features
self.out_features = out_features
self.w = nn.Linear(in_features, out_features, bias=bias)
def reset_parameters(self):
self.w.reset_parameters()
def __repr__(self):
return (
f'{self.__class__.__name__}({self.in_features}, {self.out_features}, K={self.K}, alpha={self.alpha})'
)
def forward(self, input_0, input_1):
primals_1 = self.w.weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EdisonLeeeee/GraphGallery
|
SpectralEigenConv
| false | 13,650 |
[
"MIT"
] | 300 |
4eec9c5136bda14809bd22584b26cc346cdb633b
|
https://github.com/EdisonLeeeee/GraphGallery/tree/4eec9c5136bda14809bd22584b26cc346cdb633b
|
MaskedMultiTaskCrossEntropy
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ae/caej7x6ivtbfx3razwphbsppvltxyq6d5w2qk6e66wlm2i3qn4zg.py
# Topologically Sorted Source Nodes: [eq, target_active, scores, log, mul, sub, sub_1, log_1, mul_1, add, loss_terms, ne, missing_values_mask, mul_2, sum_1, sum_2, truediv], Original ATen: [aten.eq, aten._to_copy, aten.sigmoid, aten.log, aten.mul, aten.rsub, aten.add, aten.neg, aten.ne, aten.sum, aten.div]
# Source node to ATen node mapping:
# add => add
# eq => eq
# log => log
# log_1 => log_1
# loss_terms => neg
# missing_values_mask => convert_element_type_1
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# ne => ne
# scores => sigmoid
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# sum_2 => sum_2
# target_active => convert_element_type
# truediv => div
# Graph fragment:
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg1_1, 1), kwargs = {})
# %convert_element_type : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%eq, torch.float32), kwargs = {})
# %sigmoid : [num_users=2] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sigmoid,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convert_element_type, %log), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %convert_element_type), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %sigmoid), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sub_1,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %log_1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%add,), kwargs = {})
# %ne : [num_users=1] = call_function[target=torch.ops.aten.ne.Scalar](args = (%arg1_1, 0), kwargs = {})
# %convert_element_type_1 : [num_users=2] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%ne, torch.float32), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %convert_element_type_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%convert_element_type_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused__to_copy_add_div_eq_log_mul_ne_neg_rsub_sigmoid_sum_0 = async_compile.triton('triton_per_fused__to_copy_add_div_eq_log_mul_ne_neg_rsub_sigmoid_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__to_copy_add_div_eq_log_mul_ne_neg_rsub_sigmoid_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__to_copy_add_div_eq_log_mul_ne_neg_rsub_sigmoid_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp4 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp3 = tmp2.to(tl.float32)
tmp5 = tl.sigmoid(tmp4)
tmp6 = tl_math.log(tmp5)
tmp7 = tmp3 * tmp6
tmp8 = tmp1 - tmp3
tmp9 = tmp1 - tmp5
tmp10 = tl_math.log(tmp9)
tmp11 = tmp8 * tmp10
tmp12 = tmp7 + tmp11
tmp13 = -tmp12
tmp14 = 0.0
tmp15 = tmp0 != tmp14
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp13 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tl.broadcast_to(tmp16, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp24 = tmp20 / tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp24, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [eq, target_active, scores, log, mul, sub, sub_1, log_1, mul_1, add, loss_terms, ne, missing_values_mask, mul_2, sum_1, sum_2, truediv], Original ATen: [aten.eq, aten._to_copy, aten.sigmoid, aten.log, aten.mul, aten.rsub, aten.add, aten.neg, aten.ne, aten.sum, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused__to_copy_add_div_eq_log_mul_ne_neg_rsub_sigmoid_sum_0.run(buf2, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class MaskedMultiTaskCrossEntropy(nn.Module):
def forward(self, input, target):
scores = torch.sigmoid(input)
target_active = (target == 1).float()
loss_terms = -(target_active * torch.log(scores) + (1 -
target_active) * torch.log(1 - scores))
missing_values_mask = (target != 0).float()
return (loss_terms * missing_values_mask).sum(
) / missing_values_mask.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused__to_copy_add_div_eq_log_mul_ne_neg_rsub_sigmoid_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp4 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 == tmp1
tmp3 = tmp2.to(tl.float32)
tmp5 = tl.sigmoid(tmp4)
tmp6 = tl_math.log(tmp5)
tmp7 = tmp3 * tmp6
tmp8 = tmp1 - tmp3
tmp9 = tmp1 - tmp5
tmp10 = tl_math.log(tmp9)
tmp11 = tmp8 * tmp10
tmp12 = tmp7 + tmp11
tmp13 = -tmp12
tmp14 = 0.0
tmp15 = tmp0 != tmp14
tmp16 = tmp15.to(tl.float32)
tmp17 = tmp13 * tmp16
tmp18 = tl.broadcast_to(tmp17, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tl.broadcast_to(tmp16, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp24 = tmp20 / tmp23
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp24, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused__to_copy_add_div_eq_log_mul_ne_neg_rsub_sigmoid_sum_0[
grid(1)](buf2, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class MaskedMultiTaskCrossEntropyNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EricBoittier/graph-neural-networks-for-drug-discovery
|
MaskedMultiTaskCrossEntropy
| false | 13,651 |
[
"MIT"
] | 69 |
12fed5c6e7bbd716d9f713d34067ed83dd539b50
|
https://github.com/EricBoittier/graph-neural-networks-for-drug-discovery/tree/12fed5c6e7bbd716d9f713d34067ed83dd539b50
|
IoULoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/c7/cc76qxqgausxxr6mlvmao5hrwwdwds3gtvmgmginxooupz5op2ar.py
# Topologically Sorted Source Nodes: [mul, intersection, add_1, add, total, union, add_2, IoU, sub_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.sub, aten.div, aten.rsub]
# Source node to ATen node mapping:
# IoU => div
# add => add
# add_1 => add_1
# add_2 => add_2
# intersection => sum_1
# mul => mul
# sub_1 => sub_1
# total => sum_2
# union => sub
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_2, %sum_1), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sub, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add_2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp1 + tmp2
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 1.0
tmp12 = tmp6 + tmp11
tmp13 = tmp10 - tmp6
tmp14 = tmp13 + tmp11
tmp15 = tmp12 / tmp14
tmp16 = tmp11 - tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp16, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersection, add_1, add, total, union, add_2, IoU, sub_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.sub, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sub_sum_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class IoULoss(nn.Module):
"""
The IoU metric, or Jaccard Index, is similar to the Dice metric and is calculated as the ratio between the overlap
of the positive instances between two sets, and their mutual combined values
"""
def __init__(self, weight=None, size_average=True):
super(IoULoss, self).__init__()
def forward(self, inputs, targets, smooth=1):
inputs = F.sigmoid(inputs)
inputs = inputs.view(-1)
targets = targets.view(-1)
intersection = (inputs * targets).sum()
total = (inputs + targets).sum()
union = total - intersection
IoU = (intersection + smooth) / (union + smooth)
return 1 - IoU
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sub_sum_0(in_out_ptr0, in_ptr0,
in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tmp1 + tmp2
tmp8 = tl.broadcast_to(tmp7, [RBLOCK])
tmp10 = triton_helpers.promote_to_tensor(tl.sum(tmp8, 0))
tmp11 = 1.0
tmp12 = tmp6 + tmp11
tmp13 = tmp10 - tmp6
tmp14 = tmp13 + tmp11
tmp15 = tmp12 / tmp14
tmp16 = tmp11 - tmp15
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp16, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sub_sum_0[grid(1)](buf2, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class IoULossNew(nn.Module):
"""
The IoU metric, or Jaccard Index, is similar to the Dice metric and is calculated as the ratio between the overlap
of the positive instances between two sets, and their mutual combined values
"""
def __init__(self, weight=None, size_average=True):
super(IoULossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Exdenta/torchsat
|
IoULoss
| false | 13,652 |
[
"MIT"
] | 316 |
70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
https://github.com/Exdenta/torchsat/tree/70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
FocalLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/n5/cn5gu35hxmw6a5bgkhvta4qxlaf2lrdiw45odfb7qcny6vya6wix.py
# Topologically Sorted Source Nodes: [BCE, neg, BCE_EXP, sub, pow_1, mul, focal_loss], Original ATen: [aten.binary_cross_entropy, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mul]
# Source node to ATen node mapping:
# BCE => full_default, full_default_1, log, log1p, maximum, maximum_1, mean, mul, mul_1, neg, sub, sub_1
# BCE_EXP => exp
# focal_loss => mul_3
# mul => mul_2
# neg => neg_1
# pow_1 => pow_1
# sub => sub_2
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, 1), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%view,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%neg,), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log1p, %full_default), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %maximum), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%view,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -100), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%log, %full_default_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %maximum_1), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %mul_1), kwargs = {})
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.default](args = (%sub_1,), kwargs = {})
# %neg_1 : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%mean,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg_1,), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %exp), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_2, 2), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, 0.8), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_2, %mean), kwargs = {})
triton_per_fused_binary_cross_entropy_exp_mul_neg_pow_rsub_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_exp_mul_neg_pow_rsub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_exp_mul_neg_pow_rsub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_exp_mul_neg_pow_rsub_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = -tmp4
tmp6 = libdevice.log1p(tmp5)
tmp7 = -100.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tl_math.log(tmp4)
tmp11 = triton_helpers.maximum(tmp10, tmp7)
tmp12 = tmp0 * tmp11
tmp13 = tmp9 - tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tmp19 = -tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp1 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = 0.8
tmp24 = tmp22 * tmp23
tmp25 = tmp24 * tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp25, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [BCE, neg, BCE_EXP, sub, pow_1, mul, focal_loss], Original ATen: [aten.binary_cross_entropy, aten.neg, aten.exp, aten.rsub, aten.pow, aten.mul]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_exp_mul_neg_pow_rsub_0.run(buf1, arg1_1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
"""
Focal Loss was introduced by Lin et al of Facebook AI Research in 2017 as a means of combatting extremely imbalanced datasets
where positive cases were relatively rare. Their paper "Focal Loss for Dense Object Detection" is retrievable
here: https://arxiv.org/abs/1708.02002. In practice, the researchers used an alpha-modified version of the function
so I have included it in this implementation.
"""
def __init__(self, weight=None, size_average=True):
super(FocalLoss, self).__init__()
def forward(self, inputs, targets, alpha=0.8, gamma=2, smooth=1):
inputs = F.sigmoid(inputs)
inputs = inputs.view(-1)
targets = targets.view(-1)
BCE = F.binary_cross_entropy(inputs, targets, reduction='mean')
BCE_EXP = torch.exp(-BCE)
focal_loss = alpha * (1 - BCE_EXP) ** gamma * BCE
return focal_loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_exp_mul_neg_pow_rsub_0(in_out_ptr0,
in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp0 - tmp1
tmp4 = tl.sigmoid(tmp3)
tmp5 = -tmp4
tmp6 = libdevice.log1p(tmp5)
tmp7 = -100.0
tmp8 = triton_helpers.maximum(tmp6, tmp7)
tmp9 = tmp2 * tmp8
tmp10 = tl_math.log(tmp4)
tmp11 = triton_helpers.maximum(tmp10, tmp7)
tmp12 = tmp0 * tmp11
tmp13 = tmp9 - tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = 256.0
tmp18 = tmp16 / tmp17
tmp19 = -tmp18
tmp20 = tl_math.exp(tmp19)
tmp21 = tmp1 - tmp20
tmp22 = tmp21 * tmp21
tmp23 = 0.8
tmp24 = tmp22 * tmp23
tmp25 = tmp24 * tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp25, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_exp_mul_neg_pow_rsub_0[grid(1)](
buf1, arg1_1, arg0_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class FocalLossNew(nn.Module):
"""
Focal Loss was introduced by Lin et al of Facebook AI Research in 2017 as a means of combatting extremely imbalanced datasets
where positive cases were relatively rare. Their paper "Focal Loss for Dense Object Detection" is retrievable
here: https://arxiv.org/abs/1708.02002. In practice, the researchers used an alpha-modified version of the function
so I have included it in this implementation.
"""
def __init__(self, weight=None, size_average=True):
super(FocalLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Exdenta/torchsat
|
FocalLoss
| false | 13,653 |
[
"MIT"
] | 316 |
70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
https://github.com/Exdenta/torchsat/tree/70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
rSoftMax
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/gj/cgjvqsy5zcwbtj3z3zkbr27icqciys5sxtne5znjgezeqbnfzpp6.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_1 => amax, clone, exp, sub
# Graph fragment:
# %clone : [num_users=2] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%clone, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clone, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6o/c6oft7einjbxgjggpxqa26wmlsa2xngiy67esduzi4i7y3s6gczj.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_1 => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (16*x1) + (64*x3)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (16*x1) + (64*x3)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + (16*x1) + (64*x3)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + (16*x1) + (64*x3)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x4), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 4, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf0, buf1, 256, grid=grid(256), stream=stream0)
del buf0
return (reinterpret_tensor(buf1, (4, 64), (64, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class rSoftMax(nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'radix': 4, 'cardinality': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 16
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 16 * x1 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + 16 * x1 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (8 + x0 + 16 * x1 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (12 + x0 + 16 * x1 + 64 * x3), xmask,
eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x4, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 4, 16, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf0, buf1, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf0
return reinterpret_tensor(buf1, (4, 64), (64, 1), 0),
class rSoftMaxNew(nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Exdenta/torchsat
|
rSoftMax
| false | 13,654 |
[
"MIT"
] | 316 |
70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
https://github.com/Exdenta/torchsat/tree/70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
DiceLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/il/cile5rubx7j2trpwwhxvyx2n7vmplsdhfcaobcwnijtfsgj3p43b.py
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, dice, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# dice => div
# intersection => sum_1
# mul => mul
# mul_1 => mul_1
# sub => sub
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tl.broadcast_to(tmp1, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.broadcast_to(tmp2, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 2.0
tmp14 = tmp6 * tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp9 + tmp12
tmp18 = tmp17 + tmp15
tmp19 = tmp16 / tmp18
tmp20 = tmp15 - tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp20, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, intersection, mul_1, add, sum_2, sum_3, add_1, add_2, dice, sub], Original ATen: [aten.mul, aten.sum, aten.add, aten.div, aten.rsub]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class DiceLoss(nn.Module):
"""
The Dice coefficient, or Dice-Sørensen coefficient, is a common metric for pixel segmentation
"""
def __init__(self, weight=None, size_average=True):
super(DiceLoss, self).__init__()
def forward(self, inputs, labels, smooth=1):
inputs = F.sigmoid(inputs)
inputs = inputs.view(-1)
labels = labels.view(-1)
intersection = (inputs * labels).sum()
dice = (2.0 * intersection + smooth) / (inputs.sum() + labels.sum() +
smooth)
return 1 - dice
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = tl.broadcast_to(tmp1, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.broadcast_to(tmp2, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = 2.0
tmp14 = tmp6 * tmp13
tmp15 = 1.0
tmp16 = tmp14 + tmp15
tmp17 = tmp9 + tmp12
tmp18 = tmp17 + tmp15
tmp19 = tmp16 / tmp18
tmp20 = tmp15 - tmp19
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp20, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class DiceLossNew(nn.Module):
"""
The Dice coefficient, or Dice-Sørensen coefficient, is a common metric for pixel segmentation
"""
def __init__(self, weight=None, size_average=True):
super(DiceLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Exdenta/torchsat
|
DiceLoss
| false | 13,655 |
[
"MIT"
] | 316 |
70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
https://github.com/Exdenta/torchsat/tree/70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
FocalTverskyLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mp/cmpcpy753dcuap6pqasx24fp6ysaj5g4dsbq2vmtat343khq5jmi.py
# Topologically Sorted Source Nodes: [mul, TP, add, sub, mul_1, FP, mul_3, add_1, sub_1, mul_2, FN, mul_4, add_2, add_3, Tversky, sub_2], Original ATen: [aten.mul, aten.sum, aten.add, aten.rsub, aten.div]
# Source node to ATen node mapping:
# FN => sum_3
# FP => sum_2
# TP => sum_1
# Tversky => div
# add => add
# add_1 => add_1
# add_2 => add_2
# add_3 => add_3
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# sub => sub
# sub_1 => sub_1
# sub_2 => sub_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, 1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %view), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_2, 0.5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %mul_3), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %view), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %sub_1), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_2,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_3, 0.5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, %mul_4), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_3), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %div), kwargs = {})
triton_per_fused_add_div_mul_rsub_sum_0 = async_compile.triton('triton_per_fused_add_div_mul_rsub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_mul_rsub_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 3, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp2 = tl.load(in_ptr1 + (r0), None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 1.0
tmp8 = tmp7 - tmp2
tmp9 = tmp8 * tmp1
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tmp7 - tmp1
tmp14 = tmp2 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = tmp6 + tmp7
tmp19 = 0.5
tmp20 = tmp12 * tmp19
tmp21 = tmp6 + tmp20
tmp22 = tmp17 * tmp19
tmp23 = tmp21 + tmp22
tmp24 = tmp23 + tmp7
tmp25 = tmp18 / tmp24
tmp26 = tmp7 - tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp26, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [mul, TP, add, sub, mul_1, FP, mul_3, add_1, sub_1, mul_2, FN, mul_4, add_2, add_3, Tversky, sub_2], Original ATen: [aten.mul, aten.sum, aten.add, aten.rsub, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0.run(buf3, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class FocalTverskyLoss(nn.Module):
"""
A variant on the Tversky loss that also includes the gamma modifier from Focal Loss.
"""
def __init__(self, weight=None, size_average=True):
super(FocalTverskyLoss, self).__init__()
def forward(self, inputs, targets, smooth=1, alpha=0.5, beta=0.5, gamma=1):
inputs = F.sigmoid(inputs)
inputs = inputs.view(-1)
targets = targets.view(-1)
TP = (inputs * targets).sum()
FP = ((1 - targets) * inputs).sum()
FN = (targets * (1 - inputs)).sum()
Tversky = (TP + smooth) / (TP + alpha * FP + beta * FN + smooth)
FocalTversky = (1 - Tversky) ** gamma
return FocalTversky
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_mul_rsub_sum_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp2 = tl.load(in_ptr1 + r0, None)
tmp1 = tl.sigmoid(tmp0)
tmp3 = tmp1 * tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = triton_helpers.promote_to_tensor(tl.sum(tmp4, 0))
tmp7 = 1.0
tmp8 = tmp7 - tmp2
tmp9 = tmp8 * tmp1
tmp10 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12 = triton_helpers.promote_to_tensor(tl.sum(tmp10, 0))
tmp13 = tmp7 - tmp1
tmp14 = tmp2 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = tmp6 + tmp7
tmp19 = 0.5
tmp20 = tmp12 * tmp19
tmp21 = tmp6 + tmp20
tmp22 = tmp17 * tmp19
tmp23 = tmp21 + tmp22
tmp24 = tmp23 + tmp7
tmp25 = tmp18 / tmp24
tmp26 = tmp7 - tmp25
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp26, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf3 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_add_div_mul_rsub_sum_0[grid(1)](buf3, arg0_1,
arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf3,
class FocalTverskyLossNew(nn.Module):
"""
A variant on the Tversky loss that also includes the gamma modifier from Focal Loss.
"""
def __init__(self, weight=None, size_average=True):
super(FocalTverskyLossNew, self).__init__()
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Exdenta/torchsat
|
FocalTverskyLoss
| false | 13,656 |
[
"MIT"
] | 316 |
70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
https://github.com/Exdenta/torchsat/tree/70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
ConvInRelu
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wl/cwldpc2k6v7rbizd6tlddleva3alwxblabsherkqjtef5e45djwk.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# x => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8) % 8
x2 = (xindex // 64)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + (tl_math.abs((-2) + x0))))) + ((-4)*(tl_math.abs((-3) + (tl_math.abs((-2) + x1))))) + (16*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rg/crgiuy5lc6ywpfamzhca2jbkjlo4hvgasuvj6efswlffpvmxicqa.py
# Topologically Sorted Source Nodes: [x_1, x_2, x_3], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => convolution
# x_2 => add, rsqrt, var_mean
# x_3 => relu
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_1 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 32],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_1(in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + (25*x3)), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp5 = tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(rmask & xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 25, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(rmask & xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 25.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = tl.full([1, 1], 0, tl.int32)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp28 = 0.0
tmp29 = tmp27 <= tmp28
tl.store(in_out_ptr0 + (r2 + (25*x3)), tmp2, rmask & xmask)
tl.store(out_ptr2 + (r2 + (25*x3)), tmp27, rmask & xmask)
tl.store(out_ptr3 + (r2 + (25*x3)), tmp29, rmask & xmask)
tl.store(out_ptr4 + (x3), tmp24, xmask)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 1024, grid=grid(1024), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf2 = buf1; del buf1 # reuse
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
buf7 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [x_1, x_2, x_3], Original ATen: [aten.convolution, aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_1.run(buf2, primals_3, buf3, buf7, buf8, buf6, 16, 25, grid=grid(16), stream=stream0)
del primals_3
return (buf7, primals_2, buf0, buf2, reinterpret_tensor(buf6, (16, ), (1, ), 0), buf8, reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import numpy as np
import torch.nn as nn
class ConvInRelu(nn.Module):
def __init__(self, channels_in, channels_out, kernel_size, stride=1):
super(ConvInRelu, self).__init__()
self.n_params = 0
self.channels = channels_out
self.reflection_pad = nn.ReflectionPad2d(int(np.floor(kernel_size / 2))
)
self.conv = nn.Conv2d(channels_in, channels_out, kernel_size,
stride, padding=0)
self.instancenorm = nn.InstanceNorm2d(channels_out)
self.relu = nn.ReLU(inplace=False)
def forward(self, x):
x = self.reflection_pad(x)
x = self.conv(x)
x = self.instancenorm(x)
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channels_in': 4, 'channels_out': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import numpy as np
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8 % 8
x2 = xindex // 64
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + tl_math.abs(-2 +
x0)) + -4 * tl_math.abs(-3 + tl_math.abs(-2 + x1)) + 16 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_1(
in_out_ptr0, in_ptr0, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 16
rnumel = 25
RBLOCK: tl.constexpr = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r2 = rindex
x3 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (r2 + 25 * x3), rmask & xmask, other=0.0)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tl.where(rmask & xmask, tmp3, 0)
tmp6 = tl.broadcast_to(tmp3, [XBLOCK, RBLOCK])
tmp8 = tl.where(rmask & xmask, tmp6, 0)
tmp9 = tl.sum(tmp8, 1)[:, None]
tmp10 = tl.full([XBLOCK, 1], 25, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp3 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [XBLOCK, RBLOCK])
tmp17 = tl.where(rmask & xmask, tmp15, 0)
tmp18 = tl.sum(tmp17, 1)[:, None]
tmp19 = tmp2 - tmp12
tmp20 = 25.0
tmp21 = tmp18 / tmp20
tmp22 = 1e-05
tmp23 = tmp21 + tmp22
tmp24 = libdevice.rsqrt(tmp23)
tmp25 = tmp19 * tmp24
tmp26 = tl.full([1, 1], 0, tl.int32)
tmp27 = triton_helpers.maximum(tmp26, tmp25)
tmp28 = 0.0
tmp29 = tmp27 <= tmp28
tl.store(in_out_ptr0 + (r2 + 25 * x3), tmp2, rmask & xmask)
tl.store(out_ptr2 + (r2 + 25 * x3), tmp27, rmask & xmask)
tl.store(out_ptr3 + (r2 + 25 * x3), tmp29, rmask & xmask)
tl.store(out_ptr4 + x3, tmp24, xmask)
tl.store(out_ptr0 + x3, tmp12, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 8, 8), (256, 64, 8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(1024)](primals_1, buf0,
1024, XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 5, 5), (100, 25, 5, 1))
buf2 = buf1
del buf1
buf3 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
buf7 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.float32)
buf8 = empty_strided_cuda((4, 4, 5, 5), (100, 25, 5, 1), torch.bool)
buf6 = empty_strided_cuda((1, 16, 1, 1), (16, 1, 16, 16), torch.float32
)
triton_per_fused__native_batch_norm_legit_convolution_relu_threshold_backward_1[
grid(16)](buf2, primals_3, buf3, buf7, buf8, buf6, 16, 25,
XBLOCK=8, num_warps=2, num_stages=1)
del primals_3
return buf7, primals_2, buf0, buf2, reinterpret_tensor(buf6, (16,), (1,), 0
), buf8, reinterpret_tensor(buf3, (1, 16, 1, 1), (16, 1, 1, 1), 0)
class ConvInReluNew(nn.Module):
def __init__(self, channels_in, channels_out, kernel_size, stride=1):
super(ConvInReluNew, self).__init__()
self.n_params = 0
self.channels = channels_out
self.reflection_pad = nn.ReflectionPad2d(int(np.floor(kernel_size / 2))
)
self.conv = nn.Conv2d(channels_in, channels_out, kernel_size,
stride, padding=0)
self.instancenorm = nn.InstanceNorm2d(channels_out)
self.relu = nn.ReLU(inplace=False)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_3 = self.conv.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ElistratovSemyon/style-augmentation
|
ConvInRelu
| false | 13,657 |
[
"MIT"
] | 69 |
ac88dcc92d43615e9a63d90ba58cdd8178c5b02c
|
https://github.com/ElistratovSemyon/style-augmentation/tree/ac88dcc92d43615e9a63d90ba58cdd8178c5b02c
|
LabelPropagation
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/bk/cbkxsp72qxe4rkxwcicmzvtskfizpwnd56mg5cfatr7zr2p42ofj.py
# Topologically Sorted Source Nodes: [mul_1, res, out, out_1], Original ATen: [aten.mul, aten.add, aten.clamp]
# Source node to ATen node mapping:
# mul_1 => mul_1
# out => add
# out_1 => clamp_max, clamp_min
# res => mul
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 0.5), kwargs = {})
# %mul : [num_users=50] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, 0.5), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %mul), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 1), kwargs = {})
triton_poi_fused_add_clamp_mul_0 = async_compile.triton('triton_poi_fused_add_clamp_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_mul_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 1.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tl.store(in_out_ptr0 + (x0), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0), out=buf0)
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [mul_1, res, out, out_1], Original ATen: [aten.mul, aten.add, aten.clamp]
stream0 = get_raw_stream(0)
triton_poi_fused_add_clamp_mul_0.run(buf1, arg0_1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [res, mul_2, out_2, out_3], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf3, arg0_1, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [matmul_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [res, mul_3, out_4, out_5], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf5, arg0_1, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [res, mul_4, out_6, out_7], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf7, arg0_1, 256, grid=grid(256), stream=stream0)
buf8 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0); del buf5 # reuse
# Topologically Sorted Source Nodes: [matmul_4], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf8 # reuse
# Topologically Sorted Source Nodes: [res, mul_5, out_8, out_9], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf9, arg0_1, 256, grid=grid(256), stream=stream0)
buf10 = reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), out=buf10)
buf11 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf10 # reuse
# Topologically Sorted Source Nodes: [res, mul_6, out_10, out_11], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf11, arg0_1, 256, grid=grid(256), stream=stream0)
buf12 = reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0); del buf9 # reuse
# Topologically Sorted Source Nodes: [matmul_6], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0), out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf12 # reuse
# Topologically Sorted Source Nodes: [res, mul_7, out_12, out_13], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf13, arg0_1, 256, grid=grid(256), stream=stream0)
buf14 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0); del buf11 # reuse
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0), out=buf14)
buf15 = reinterpret_tensor(buf14, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf14 # reuse
# Topologically Sorted Source Nodes: [res, mul_8, out_14, out_15], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf15, arg0_1, 256, grid=grid(256), stream=stream0)
buf16 = reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0); del buf13 # reuse
# Topologically Sorted Source Nodes: [matmul_8], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0), out=buf16)
buf17 = reinterpret_tensor(buf16, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf16 # reuse
# Topologically Sorted Source Nodes: [res, mul_9, out_16, out_17], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf17, arg0_1, 256, grid=grid(256), stream=stream0)
buf18 = reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0); del buf15 # reuse
# Topologically Sorted Source Nodes: [matmul_9], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0), out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [res, mul_10, out_18, out_19], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf19, arg0_1, 256, grid=grid(256), stream=stream0)
buf20 = reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0); del buf17 # reuse
# Topologically Sorted Source Nodes: [matmul_10], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0), out=buf20)
buf21 = reinterpret_tensor(buf20, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf20 # reuse
# Topologically Sorted Source Nodes: [res, mul_11, out_20, out_21], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf21, arg0_1, 256, grid=grid(256), stream=stream0)
buf22 = reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0); del buf19 # reuse
# Topologically Sorted Source Nodes: [matmul_11], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0), out=buf22)
buf23 = reinterpret_tensor(buf22, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf22 # reuse
# Topologically Sorted Source Nodes: [res, mul_12, out_22, out_23], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf23, arg0_1, 256, grid=grid(256), stream=stream0)
buf24 = reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0); del buf21 # reuse
# Topologically Sorted Source Nodes: [matmul_12], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0), out=buf24)
buf25 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf24 # reuse
# Topologically Sorted Source Nodes: [res, mul_13, out_24, out_25], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf25, arg0_1, 256, grid=grid(256), stream=stream0)
buf26 = reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0); del buf23 # reuse
# Topologically Sorted Source Nodes: [matmul_13], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0), out=buf26)
buf27 = reinterpret_tensor(buf26, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf26 # reuse
# Topologically Sorted Source Nodes: [res, mul_14, out_26, out_27], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf27, arg0_1, 256, grid=grid(256), stream=stream0)
buf28 = reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0); del buf25 # reuse
# Topologically Sorted Source Nodes: [matmul_14], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1), 0), out=buf28)
buf29 = reinterpret_tensor(buf28, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf28 # reuse
# Topologically Sorted Source Nodes: [res, mul_15, out_28, out_29], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf29, arg0_1, 256, grid=grid(256), stream=stream0)
buf30 = reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1), 0); del buf27 # reuse
# Topologically Sorted Source Nodes: [matmul_15], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf29, (16, 4, 4), (16, 4, 1), 0), out=buf30)
buf31 = reinterpret_tensor(buf30, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf30 # reuse
# Topologically Sorted Source Nodes: [res, mul_16, out_30, out_31], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf31, arg0_1, 256, grid=grid(256), stream=stream0)
buf32 = reinterpret_tensor(buf29, (16, 4, 4), (16, 4, 1), 0); del buf29 # reuse
# Topologically Sorted Source Nodes: [matmul_16], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf31, (16, 4, 4), (16, 4, 1), 0), out=buf32)
buf33 = reinterpret_tensor(buf32, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf32 # reuse
# Topologically Sorted Source Nodes: [res, mul_17, out_32, out_33], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf33, arg0_1, 256, grid=grid(256), stream=stream0)
buf34 = reinterpret_tensor(buf31, (16, 4, 4), (16, 4, 1), 0); del buf31 # reuse
# Topologically Sorted Source Nodes: [matmul_17], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf33, (16, 4, 4), (16, 4, 1), 0), out=buf34)
buf35 = reinterpret_tensor(buf34, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf34 # reuse
# Topologically Sorted Source Nodes: [res, mul_18, out_34, out_35], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf35, arg0_1, 256, grid=grid(256), stream=stream0)
buf36 = reinterpret_tensor(buf33, (16, 4, 4), (16, 4, 1), 0); del buf33 # reuse
# Topologically Sorted Source Nodes: [matmul_18], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0), out=buf36)
buf37 = reinterpret_tensor(buf36, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf36 # reuse
# Topologically Sorted Source Nodes: [res, mul_19, out_36, out_37], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf37, arg0_1, 256, grid=grid(256), stream=stream0)
buf38 = reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0); del buf35 # reuse
# Topologically Sorted Source Nodes: [matmul_19], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf37, (16, 4, 4), (16, 4, 1), 0), out=buf38)
buf39 = reinterpret_tensor(buf38, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf38 # reuse
# Topologically Sorted Source Nodes: [res, mul_20, out_38, out_39], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf39, arg0_1, 256, grid=grid(256), stream=stream0)
buf40 = reinterpret_tensor(buf37, (16, 4, 4), (16, 4, 1), 0); del buf37 # reuse
# Topologically Sorted Source Nodes: [matmul_20], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf39, (16, 4, 4), (16, 4, 1), 0), out=buf40)
buf41 = reinterpret_tensor(buf40, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf40 # reuse
# Topologically Sorted Source Nodes: [res, mul_21, out_40, out_41], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf41, arg0_1, 256, grid=grid(256), stream=stream0)
buf42 = reinterpret_tensor(buf39, (16, 4, 4), (16, 4, 1), 0); del buf39 # reuse
# Topologically Sorted Source Nodes: [matmul_21], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf41, (16, 4, 4), (16, 4, 1), 0), out=buf42)
buf43 = reinterpret_tensor(buf42, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf42 # reuse
# Topologically Sorted Source Nodes: [res, mul_22, out_42, out_43], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf43, arg0_1, 256, grid=grid(256), stream=stream0)
buf44 = reinterpret_tensor(buf41, (16, 4, 4), (16, 4, 1), 0); del buf41 # reuse
# Topologically Sorted Source Nodes: [matmul_22], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf43, (16, 4, 4), (16, 4, 1), 0), out=buf44)
buf45 = reinterpret_tensor(buf44, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf44 # reuse
# Topologically Sorted Source Nodes: [res, mul_23, out_44, out_45], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf45, arg0_1, 256, grid=grid(256), stream=stream0)
buf46 = reinterpret_tensor(buf43, (16, 4, 4), (16, 4, 1), 0); del buf43 # reuse
# Topologically Sorted Source Nodes: [matmul_23], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf45, (16, 4, 4), (16, 4, 1), 0), out=buf46)
buf47 = reinterpret_tensor(buf46, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf46 # reuse
# Topologically Sorted Source Nodes: [res, mul_24, out_46, out_47], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf47, arg0_1, 256, grid=grid(256), stream=stream0)
buf48 = reinterpret_tensor(buf45, (16, 4, 4), (16, 4, 1), 0); del buf45 # reuse
# Topologically Sorted Source Nodes: [matmul_24], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf47, (16, 4, 4), (16, 4, 1), 0), out=buf48)
buf49 = reinterpret_tensor(buf48, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf48 # reuse
# Topologically Sorted Source Nodes: [res, mul_25, out_48, out_49], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf49, arg0_1, 256, grid=grid(256), stream=stream0)
buf50 = reinterpret_tensor(buf47, (16, 4, 4), (16, 4, 1), 0); del buf47 # reuse
# Topologically Sorted Source Nodes: [matmul_25], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf49, (16, 4, 4), (16, 4, 1), 0), out=buf50)
buf51 = reinterpret_tensor(buf50, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf50 # reuse
# Topologically Sorted Source Nodes: [res, mul_26, out_50, out_51], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf51, arg0_1, 256, grid=grid(256), stream=stream0)
buf52 = reinterpret_tensor(buf49, (16, 4, 4), (16, 4, 1), 0); del buf49 # reuse
# Topologically Sorted Source Nodes: [matmul_26], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf51, (16, 4, 4), (16, 4, 1), 0), out=buf52)
buf53 = reinterpret_tensor(buf52, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf52 # reuse
# Topologically Sorted Source Nodes: [res, mul_27, out_52, out_53], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf53, arg0_1, 256, grid=grid(256), stream=stream0)
buf54 = reinterpret_tensor(buf51, (16, 4, 4), (16, 4, 1), 0); del buf51 # reuse
# Topologically Sorted Source Nodes: [matmul_27], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf53, (16, 4, 4), (16, 4, 1), 0), out=buf54)
buf55 = reinterpret_tensor(buf54, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf54 # reuse
# Topologically Sorted Source Nodes: [res, mul_28, out_54, out_55], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf55, arg0_1, 256, grid=grid(256), stream=stream0)
buf56 = reinterpret_tensor(buf53, (16, 4, 4), (16, 4, 1), 0); del buf53 # reuse
# Topologically Sorted Source Nodes: [matmul_28], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf55, (16, 4, 4), (16, 4, 1), 0), out=buf56)
buf57 = reinterpret_tensor(buf56, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf56 # reuse
# Topologically Sorted Source Nodes: [res, mul_29, out_56, out_57], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf57, arg0_1, 256, grid=grid(256), stream=stream0)
buf58 = reinterpret_tensor(buf55, (16, 4, 4), (16, 4, 1), 0); del buf55 # reuse
# Topologically Sorted Source Nodes: [matmul_29], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf57, (16, 4, 4), (16, 4, 1), 0), out=buf58)
buf59 = reinterpret_tensor(buf58, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf58 # reuse
# Topologically Sorted Source Nodes: [res, mul_30, out_58, out_59], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf59, arg0_1, 256, grid=grid(256), stream=stream0)
buf60 = reinterpret_tensor(buf57, (16, 4, 4), (16, 4, 1), 0); del buf57 # reuse
# Topologically Sorted Source Nodes: [matmul_30], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf59, (16, 4, 4), (16, 4, 1), 0), out=buf60)
buf61 = reinterpret_tensor(buf60, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf60 # reuse
# Topologically Sorted Source Nodes: [res, mul_31, out_60, out_61], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf61, arg0_1, 256, grid=grid(256), stream=stream0)
buf62 = reinterpret_tensor(buf59, (16, 4, 4), (16, 4, 1), 0); del buf59 # reuse
# Topologically Sorted Source Nodes: [matmul_31], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf61, (16, 4, 4), (16, 4, 1), 0), out=buf62)
buf63 = reinterpret_tensor(buf62, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf62 # reuse
# Topologically Sorted Source Nodes: [res, mul_32, out_62, out_63], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf63, arg0_1, 256, grid=grid(256), stream=stream0)
buf64 = reinterpret_tensor(buf61, (16, 4, 4), (16, 4, 1), 0); del buf61 # reuse
# Topologically Sorted Source Nodes: [matmul_32], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf63, (16, 4, 4), (16, 4, 1), 0), out=buf64)
buf65 = reinterpret_tensor(buf64, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf64 # reuse
# Topologically Sorted Source Nodes: [res, mul_33, out_64, out_65], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf65, arg0_1, 256, grid=grid(256), stream=stream0)
buf66 = reinterpret_tensor(buf63, (16, 4, 4), (16, 4, 1), 0); del buf63 # reuse
# Topologically Sorted Source Nodes: [matmul_33], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf65, (16, 4, 4), (16, 4, 1), 0), out=buf66)
buf67 = reinterpret_tensor(buf66, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf66 # reuse
# Topologically Sorted Source Nodes: [res, mul_34, out_66, out_67], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf67, arg0_1, 256, grid=grid(256), stream=stream0)
buf68 = reinterpret_tensor(buf65, (16, 4, 4), (16, 4, 1), 0); del buf65 # reuse
# Topologically Sorted Source Nodes: [matmul_34], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf67, (16, 4, 4), (16, 4, 1), 0), out=buf68)
buf69 = reinterpret_tensor(buf68, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf68 # reuse
# Topologically Sorted Source Nodes: [res, mul_35, out_68, out_69], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf69, arg0_1, 256, grid=grid(256), stream=stream0)
buf70 = reinterpret_tensor(buf67, (16, 4, 4), (16, 4, 1), 0); del buf67 # reuse
# Topologically Sorted Source Nodes: [matmul_35], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf69, (16, 4, 4), (16, 4, 1), 0), out=buf70)
buf71 = reinterpret_tensor(buf70, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf70 # reuse
# Topologically Sorted Source Nodes: [res, mul_36, out_70, out_71], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf71, arg0_1, 256, grid=grid(256), stream=stream0)
buf72 = reinterpret_tensor(buf69, (16, 4, 4), (16, 4, 1), 0); del buf69 # reuse
# Topologically Sorted Source Nodes: [matmul_36], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf71, (16, 4, 4), (16, 4, 1), 0), out=buf72)
buf73 = reinterpret_tensor(buf72, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf72 # reuse
# Topologically Sorted Source Nodes: [res, mul_37, out_72, out_73], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf73, arg0_1, 256, grid=grid(256), stream=stream0)
buf74 = reinterpret_tensor(buf71, (16, 4, 4), (16, 4, 1), 0); del buf71 # reuse
# Topologically Sorted Source Nodes: [matmul_37], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf73, (16, 4, 4), (16, 4, 1), 0), out=buf74)
buf75 = reinterpret_tensor(buf74, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf74 # reuse
# Topologically Sorted Source Nodes: [res, mul_38, out_74, out_75], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf75, arg0_1, 256, grid=grid(256), stream=stream0)
buf76 = reinterpret_tensor(buf73, (16, 4, 4), (16, 4, 1), 0); del buf73 # reuse
# Topologically Sorted Source Nodes: [matmul_38], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf75, (16, 4, 4), (16, 4, 1), 0), out=buf76)
buf77 = reinterpret_tensor(buf76, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf76 # reuse
# Topologically Sorted Source Nodes: [res, mul_39, out_76, out_77], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf77, arg0_1, 256, grid=grid(256), stream=stream0)
buf78 = reinterpret_tensor(buf75, (16, 4, 4), (16, 4, 1), 0); del buf75 # reuse
# Topologically Sorted Source Nodes: [matmul_39], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf77, (16, 4, 4), (16, 4, 1), 0), out=buf78)
buf79 = reinterpret_tensor(buf78, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf78 # reuse
# Topologically Sorted Source Nodes: [res, mul_40, out_78, out_79], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf79, arg0_1, 256, grid=grid(256), stream=stream0)
buf80 = reinterpret_tensor(buf77, (16, 4, 4), (16, 4, 1), 0); del buf77 # reuse
# Topologically Sorted Source Nodes: [matmul_40], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf79, (16, 4, 4), (16, 4, 1), 0), out=buf80)
buf81 = reinterpret_tensor(buf80, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf80 # reuse
# Topologically Sorted Source Nodes: [res, mul_41, out_80, out_81], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf81, arg0_1, 256, grid=grid(256), stream=stream0)
buf82 = reinterpret_tensor(buf79, (16, 4, 4), (16, 4, 1), 0); del buf79 # reuse
# Topologically Sorted Source Nodes: [matmul_41], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf81, (16, 4, 4), (16, 4, 1), 0), out=buf82)
buf83 = reinterpret_tensor(buf82, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf82 # reuse
# Topologically Sorted Source Nodes: [res, mul_42, out_82, out_83], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf83, arg0_1, 256, grid=grid(256), stream=stream0)
buf84 = reinterpret_tensor(buf81, (16, 4, 4), (16, 4, 1), 0); del buf81 # reuse
# Topologically Sorted Source Nodes: [matmul_42], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf83, (16, 4, 4), (16, 4, 1), 0), out=buf84)
buf85 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf84 # reuse
# Topologically Sorted Source Nodes: [res, mul_43, out_84, out_85], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf85, arg0_1, 256, grid=grid(256), stream=stream0)
buf86 = reinterpret_tensor(buf83, (16, 4, 4), (16, 4, 1), 0); del buf83 # reuse
# Topologically Sorted Source Nodes: [matmul_43], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf85, (16, 4, 4), (16, 4, 1), 0), out=buf86)
buf87 = reinterpret_tensor(buf86, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf86 # reuse
# Topologically Sorted Source Nodes: [res, mul_44, out_86, out_87], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf87, arg0_1, 256, grid=grid(256), stream=stream0)
buf88 = reinterpret_tensor(buf85, (16, 4, 4), (16, 4, 1), 0); del buf85 # reuse
# Topologically Sorted Source Nodes: [matmul_44], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf87, (16, 4, 4), (16, 4, 1), 0), out=buf88)
buf89 = reinterpret_tensor(buf88, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf88 # reuse
# Topologically Sorted Source Nodes: [res, mul_45, out_88, out_89], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf89, arg0_1, 256, grid=grid(256), stream=stream0)
buf90 = reinterpret_tensor(buf87, (16, 4, 4), (16, 4, 1), 0); del buf87 # reuse
# Topologically Sorted Source Nodes: [matmul_45], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf89, (16, 4, 4), (16, 4, 1), 0), out=buf90)
buf91 = reinterpret_tensor(buf90, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf90 # reuse
# Topologically Sorted Source Nodes: [res, mul_46, out_90, out_91], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf91, arg0_1, 256, grid=grid(256), stream=stream0)
buf92 = reinterpret_tensor(buf89, (16, 4, 4), (16, 4, 1), 0); del buf89 # reuse
# Topologically Sorted Source Nodes: [matmul_46], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf91, (16, 4, 4), (16, 4, 1), 0), out=buf92)
buf93 = reinterpret_tensor(buf92, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf92 # reuse
# Topologically Sorted Source Nodes: [res, mul_47, out_92, out_93], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf93, arg0_1, 256, grid=grid(256), stream=stream0)
buf94 = reinterpret_tensor(buf91, (16, 4, 4), (16, 4, 1), 0); del buf91 # reuse
# Topologically Sorted Source Nodes: [matmul_47], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf93, (16, 4, 4), (16, 4, 1), 0), out=buf94)
buf95 = reinterpret_tensor(buf94, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf94 # reuse
# Topologically Sorted Source Nodes: [res, mul_48, out_94, out_95], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf95, arg0_1, 256, grid=grid(256), stream=stream0)
buf96 = reinterpret_tensor(buf93, (16, 4, 4), (16, 4, 1), 0); del buf93 # reuse
# Topologically Sorted Source Nodes: [matmul_48], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf95, (16, 4, 4), (16, 4, 1), 0), out=buf96)
buf97 = reinterpret_tensor(buf96, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf96 # reuse
# Topologically Sorted Source Nodes: [res, mul_49, out_96, out_97], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf97, arg0_1, 256, grid=grid(256), stream=stream0)
buf98 = reinterpret_tensor(buf95, (16, 4, 4), (16, 4, 1), 0); del buf95 # reuse
# Topologically Sorted Source Nodes: [matmul_49], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf97, (16, 4, 4), (16, 4, 1), 0), out=buf98)
del arg1_1
del buf97
buf99 = reinterpret_tensor(buf98, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf98 # reuse
# Topologically Sorted Source Nodes: [res, mul_50, out_98, out_99], Original ATen: [aten.mul, aten.add, aten.clamp]
triton_poi_fused_add_clamp_mul_0.run(buf99, arg0_1, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf99, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class LabelPropagation(nn.Module):
"""label propagation model adapted from https://github.com/CUAI/CorrectAndSmooth
`"Learning from Labeled and
Unlabeled Datawith Label Propagation"
<http://mlg.eng.cam.ac.uk/zoubin/papers/CMU-CALD-02-107.pdf>`_ paper
"""
def __init__(self, num_layers=50, alpha=0.5, residual=True):
super().__init__()
self.num_layers = num_layers
self.alpha = alpha
self.residual = residual
@torch.no_grad()
def forward(self, y, adj, mask=None):
if y.dtype == torch.long:
y = F.one_hot(y.view(-1)).float()
out = y
if mask is not None:
out = torch.zeros_like(y)
out[mask] = y[mask]
if self.residual:
res = (1 - self.alpha) * out
else:
res = out.clone()
for _ in range(self.num_layers):
out = self.alpha * (adj @ out) + res
out = torch.clamp(out, 0, 1)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_clamp_mul_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_out_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp5 = tmp2 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 1.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tl.store(in_out_ptr0 + x0, tmp9, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(arg0_1, (16, 4, 4), (16, 4, 1), 0),
out=buf0)
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
get_raw_stream(0)
triton_poi_fused_add_clamp_mul_0[grid(256)](buf1, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0), out
=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
triton_poi_fused_add_clamp_mul_0[grid(256)](buf3, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf1, (16, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), out
=buf4)
buf5 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused_add_clamp_mul_0[grid(256)](buf5, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0)
del buf3
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0), out
=buf6)
buf7 = reinterpret_tensor(buf6, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf6
triton_poi_fused_add_clamp_mul_0[grid(256)](buf7, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf8 = reinterpret_tensor(buf5, (16, 4, 4), (16, 4, 1), 0)
del buf5
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0), out
=buf8)
buf9 = reinterpret_tensor(buf8, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf8
triton_poi_fused_add_clamp_mul_0[grid(256)](buf9, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf10 = reinterpret_tensor(buf7, (16, 4, 4), (16, 4, 1), 0)
del buf7
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0), out
=buf10)
buf11 = reinterpret_tensor(buf10, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf10
triton_poi_fused_add_clamp_mul_0[grid(256)](buf11, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf12 = reinterpret_tensor(buf9, (16, 4, 4), (16, 4, 1), 0)
del buf9
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0),
out=buf12)
buf13 = reinterpret_tensor(buf12, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf12
triton_poi_fused_add_clamp_mul_0[grid(256)](buf13, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf14 = reinterpret_tensor(buf11, (16, 4, 4), (16, 4, 1), 0)
del buf11
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0),
out=buf14)
buf15 = reinterpret_tensor(buf14, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf14
triton_poi_fused_add_clamp_mul_0[grid(256)](buf15, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf16 = reinterpret_tensor(buf13, (16, 4, 4), (16, 4, 1), 0)
del buf13
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0),
out=buf16)
buf17 = reinterpret_tensor(buf16, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf16
triton_poi_fused_add_clamp_mul_0[grid(256)](buf17, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf18 = reinterpret_tensor(buf15, (16, 4, 4), (16, 4, 1), 0)
del buf15
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0),
out=buf18)
buf19 = reinterpret_tensor(buf18, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf18
triton_poi_fused_add_clamp_mul_0[grid(256)](buf19, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf20 = reinterpret_tensor(buf17, (16, 4, 4), (16, 4, 1), 0)
del buf17
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0),
out=buf20)
buf21 = reinterpret_tensor(buf20, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf20
triton_poi_fused_add_clamp_mul_0[grid(256)](buf21, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf22 = reinterpret_tensor(buf19, (16, 4, 4), (16, 4, 1), 0)
del buf19
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0),
out=buf22)
buf23 = reinterpret_tensor(buf22, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf22
triton_poi_fused_add_clamp_mul_0[grid(256)](buf23, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf24 = reinterpret_tensor(buf21, (16, 4, 4), (16, 4, 1), 0)
del buf21
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0),
out=buf24)
buf25 = reinterpret_tensor(buf24, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf24
triton_poi_fused_add_clamp_mul_0[grid(256)](buf25, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf26 = reinterpret_tensor(buf23, (16, 4, 4), (16, 4, 1), 0)
del buf23
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0),
out=buf26)
buf27 = reinterpret_tensor(buf26, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf26
triton_poi_fused_add_clamp_mul_0[grid(256)](buf27, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf28 = reinterpret_tensor(buf25, (16, 4, 4), (16, 4, 1), 0)
del buf25
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1), 0),
out=buf28)
buf29 = reinterpret_tensor(buf28, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf28
triton_poi_fused_add_clamp_mul_0[grid(256)](buf29, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf30 = reinterpret_tensor(buf27, (16, 4, 4), (16, 4, 1), 0)
del buf27
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf29, (16, 4, 4), (16, 4, 1), 0),
out=buf30)
buf31 = reinterpret_tensor(buf30, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf30
triton_poi_fused_add_clamp_mul_0[grid(256)](buf31, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf32 = reinterpret_tensor(buf29, (16, 4, 4), (16, 4, 1), 0)
del buf29
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf31, (16, 4, 4), (16, 4, 1), 0),
out=buf32)
buf33 = reinterpret_tensor(buf32, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf32
triton_poi_fused_add_clamp_mul_0[grid(256)](buf33, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf34 = reinterpret_tensor(buf31, (16, 4, 4), (16, 4, 1), 0)
del buf31
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf33, (16, 4, 4), (16, 4, 1), 0),
out=buf34)
buf35 = reinterpret_tensor(buf34, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf34
triton_poi_fused_add_clamp_mul_0[grid(256)](buf35, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf36 = reinterpret_tensor(buf33, (16, 4, 4), (16, 4, 1), 0)
del buf33
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0),
out=buf36)
buf37 = reinterpret_tensor(buf36, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf36
triton_poi_fused_add_clamp_mul_0[grid(256)](buf37, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf38 = reinterpret_tensor(buf35, (16, 4, 4), (16, 4, 1), 0)
del buf35
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf37, (16, 4, 4), (16, 4, 1), 0),
out=buf38)
buf39 = reinterpret_tensor(buf38, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf38
triton_poi_fused_add_clamp_mul_0[grid(256)](buf39, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf40 = reinterpret_tensor(buf37, (16, 4, 4), (16, 4, 1), 0)
del buf37
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf39, (16, 4, 4), (16, 4, 1), 0),
out=buf40)
buf41 = reinterpret_tensor(buf40, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf40
triton_poi_fused_add_clamp_mul_0[grid(256)](buf41, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf42 = reinterpret_tensor(buf39, (16, 4, 4), (16, 4, 1), 0)
del buf39
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf41, (16, 4, 4), (16, 4, 1), 0),
out=buf42)
buf43 = reinterpret_tensor(buf42, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf42
triton_poi_fused_add_clamp_mul_0[grid(256)](buf43, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf44 = reinterpret_tensor(buf41, (16, 4, 4), (16, 4, 1), 0)
del buf41
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf43, (16, 4, 4), (16, 4, 1), 0),
out=buf44)
buf45 = reinterpret_tensor(buf44, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf44
triton_poi_fused_add_clamp_mul_0[grid(256)](buf45, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf46 = reinterpret_tensor(buf43, (16, 4, 4), (16, 4, 1), 0)
del buf43
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf45, (16, 4, 4), (16, 4, 1), 0),
out=buf46)
buf47 = reinterpret_tensor(buf46, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf46
triton_poi_fused_add_clamp_mul_0[grid(256)](buf47, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf48 = reinterpret_tensor(buf45, (16, 4, 4), (16, 4, 1), 0)
del buf45
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf47, (16, 4, 4), (16, 4, 1), 0),
out=buf48)
buf49 = reinterpret_tensor(buf48, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf48
triton_poi_fused_add_clamp_mul_0[grid(256)](buf49, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf50 = reinterpret_tensor(buf47, (16, 4, 4), (16, 4, 1), 0)
del buf47
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf49, (16, 4, 4), (16, 4, 1), 0),
out=buf50)
buf51 = reinterpret_tensor(buf50, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf50
triton_poi_fused_add_clamp_mul_0[grid(256)](buf51, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf52 = reinterpret_tensor(buf49, (16, 4, 4), (16, 4, 1), 0)
del buf49
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf51, (16, 4, 4), (16, 4, 1), 0),
out=buf52)
buf53 = reinterpret_tensor(buf52, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf52
triton_poi_fused_add_clamp_mul_0[grid(256)](buf53, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf54 = reinterpret_tensor(buf51, (16, 4, 4), (16, 4, 1), 0)
del buf51
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf53, (16, 4, 4), (16, 4, 1), 0),
out=buf54)
buf55 = reinterpret_tensor(buf54, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf54
triton_poi_fused_add_clamp_mul_0[grid(256)](buf55, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf56 = reinterpret_tensor(buf53, (16, 4, 4), (16, 4, 1), 0)
del buf53
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf55, (16, 4, 4), (16, 4, 1), 0),
out=buf56)
buf57 = reinterpret_tensor(buf56, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf56
triton_poi_fused_add_clamp_mul_0[grid(256)](buf57, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf58 = reinterpret_tensor(buf55, (16, 4, 4), (16, 4, 1), 0)
del buf55
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf57, (16, 4, 4), (16, 4, 1), 0),
out=buf58)
buf59 = reinterpret_tensor(buf58, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf58
triton_poi_fused_add_clamp_mul_0[grid(256)](buf59, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf60 = reinterpret_tensor(buf57, (16, 4, 4), (16, 4, 1), 0)
del buf57
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf59, (16, 4, 4), (16, 4, 1), 0),
out=buf60)
buf61 = reinterpret_tensor(buf60, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf60
triton_poi_fused_add_clamp_mul_0[grid(256)](buf61, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf62 = reinterpret_tensor(buf59, (16, 4, 4), (16, 4, 1), 0)
del buf59
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf61, (16, 4, 4), (16, 4, 1), 0),
out=buf62)
buf63 = reinterpret_tensor(buf62, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf62
triton_poi_fused_add_clamp_mul_0[grid(256)](buf63, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf64 = reinterpret_tensor(buf61, (16, 4, 4), (16, 4, 1), 0)
del buf61
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf63, (16, 4, 4), (16, 4, 1), 0),
out=buf64)
buf65 = reinterpret_tensor(buf64, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf64
triton_poi_fused_add_clamp_mul_0[grid(256)](buf65, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf66 = reinterpret_tensor(buf63, (16, 4, 4), (16, 4, 1), 0)
del buf63
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf65, (16, 4, 4), (16, 4, 1), 0),
out=buf66)
buf67 = reinterpret_tensor(buf66, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf66
triton_poi_fused_add_clamp_mul_0[grid(256)](buf67, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf68 = reinterpret_tensor(buf65, (16, 4, 4), (16, 4, 1), 0)
del buf65
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf67, (16, 4, 4), (16, 4, 1), 0),
out=buf68)
buf69 = reinterpret_tensor(buf68, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf68
triton_poi_fused_add_clamp_mul_0[grid(256)](buf69, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf70 = reinterpret_tensor(buf67, (16, 4, 4), (16, 4, 1), 0)
del buf67
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf69, (16, 4, 4), (16, 4, 1), 0),
out=buf70)
buf71 = reinterpret_tensor(buf70, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf70
triton_poi_fused_add_clamp_mul_0[grid(256)](buf71, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf72 = reinterpret_tensor(buf69, (16, 4, 4), (16, 4, 1), 0)
del buf69
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf71, (16, 4, 4), (16, 4, 1), 0),
out=buf72)
buf73 = reinterpret_tensor(buf72, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf72
triton_poi_fused_add_clamp_mul_0[grid(256)](buf73, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf74 = reinterpret_tensor(buf71, (16, 4, 4), (16, 4, 1), 0)
del buf71
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf73, (16, 4, 4), (16, 4, 1), 0),
out=buf74)
buf75 = reinterpret_tensor(buf74, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf74
triton_poi_fused_add_clamp_mul_0[grid(256)](buf75, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf76 = reinterpret_tensor(buf73, (16, 4, 4), (16, 4, 1), 0)
del buf73
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf75, (16, 4, 4), (16, 4, 1), 0),
out=buf76)
buf77 = reinterpret_tensor(buf76, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf76
triton_poi_fused_add_clamp_mul_0[grid(256)](buf77, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf78 = reinterpret_tensor(buf75, (16, 4, 4), (16, 4, 1), 0)
del buf75
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf77, (16, 4, 4), (16, 4, 1), 0),
out=buf78)
buf79 = reinterpret_tensor(buf78, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf78
triton_poi_fused_add_clamp_mul_0[grid(256)](buf79, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf80 = reinterpret_tensor(buf77, (16, 4, 4), (16, 4, 1), 0)
del buf77
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf79, (16, 4, 4), (16, 4, 1), 0),
out=buf80)
buf81 = reinterpret_tensor(buf80, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf80
triton_poi_fused_add_clamp_mul_0[grid(256)](buf81, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf82 = reinterpret_tensor(buf79, (16, 4, 4), (16, 4, 1), 0)
del buf79
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf81, (16, 4, 4), (16, 4, 1), 0),
out=buf82)
buf83 = reinterpret_tensor(buf82, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf82
triton_poi_fused_add_clamp_mul_0[grid(256)](buf83, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf84 = reinterpret_tensor(buf81, (16, 4, 4), (16, 4, 1), 0)
del buf81
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf83, (16, 4, 4), (16, 4, 1), 0),
out=buf84)
buf85 = reinterpret_tensor(buf84, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf84
triton_poi_fused_add_clamp_mul_0[grid(256)](buf85, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf86 = reinterpret_tensor(buf83, (16, 4, 4), (16, 4, 1), 0)
del buf83
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf85, (16, 4, 4), (16, 4, 1), 0),
out=buf86)
buf87 = reinterpret_tensor(buf86, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf86
triton_poi_fused_add_clamp_mul_0[grid(256)](buf87, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf88 = reinterpret_tensor(buf85, (16, 4, 4), (16, 4, 1), 0)
del buf85
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf87, (16, 4, 4), (16, 4, 1), 0),
out=buf88)
buf89 = reinterpret_tensor(buf88, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf88
triton_poi_fused_add_clamp_mul_0[grid(256)](buf89, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf90 = reinterpret_tensor(buf87, (16, 4, 4), (16, 4, 1), 0)
del buf87
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf89, (16, 4, 4), (16, 4, 1), 0),
out=buf90)
buf91 = reinterpret_tensor(buf90, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf90
triton_poi_fused_add_clamp_mul_0[grid(256)](buf91, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf92 = reinterpret_tensor(buf89, (16, 4, 4), (16, 4, 1), 0)
del buf89
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf91, (16, 4, 4), (16, 4, 1), 0),
out=buf92)
buf93 = reinterpret_tensor(buf92, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf92
triton_poi_fused_add_clamp_mul_0[grid(256)](buf93, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf94 = reinterpret_tensor(buf91, (16, 4, 4), (16, 4, 1), 0)
del buf91
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf93, (16, 4, 4), (16, 4, 1), 0),
out=buf94)
buf95 = reinterpret_tensor(buf94, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf94
triton_poi_fused_add_clamp_mul_0[grid(256)](buf95, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf96 = reinterpret_tensor(buf93, (16, 4, 4), (16, 4, 1), 0)
del buf93
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf95, (16, 4, 4), (16, 4, 1), 0),
out=buf96)
buf97 = reinterpret_tensor(buf96, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf96
triton_poi_fused_add_clamp_mul_0[grid(256)](buf97, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
buf98 = reinterpret_tensor(buf95, (16, 4, 4), (16, 4, 1), 0)
del buf95
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf97, (16, 4, 4), (16, 4, 1), 0),
out=buf98)
del arg1_1
del buf97
buf99 = reinterpret_tensor(buf98, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf98
triton_poi_fused_add_clamp_mul_0[grid(256)](buf99, arg0_1, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf99,
class LabelPropagationNew(nn.Module):
"""label propagation model adapted from https://github.com/CUAI/CorrectAndSmooth
`"Learning from Labeled and
Unlabeled Datawith Label Propagation"
<http://mlg.eng.cam.ac.uk/zoubin/papers/CMU-CALD-02-107.pdf>`_ paper
"""
def __init__(self, num_layers=50, alpha=0.5, residual=True):
super().__init__()
self.num_layers = num_layers
self.alpha = alpha
self.residual = residual
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
EdisonLeeeee/GraphGallery
|
LabelPropagation
| false | 13,658 |
[
"MIT"
] | 300 |
4eec9c5136bda14809bd22584b26cc346cdb633b
|
https://github.com/EdisonLeeeee/GraphGallery/tree/4eec9c5136bda14809bd22584b26cc346cdb633b
|
MaxPool2dDynamicSamePadding
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qq/cqqb7lg6q4ad6ao65faglzxqzvt3ad3sdvodh7k57sce43e7szvx.py
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# max_pool2d => getitem
# Graph fragment:
# %getitem : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_0 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x0 = xindex % 4
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x4), tmp10 & xmask, other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp8 & tmp13
tmp16 = tmp15 & tmp14
tmp17 = tl.load(in_ptr0 + ((-4) + x4), tmp16 & xmask, other=0.0)
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp8 & tmp20
tmp23 = tmp22 & tmp21
tmp24 = tl.load(in_ptr0 + ((-3) + x4), tmp23 & xmask, other=0.0)
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp8 & tmp27
tmp30 = tmp29 & tmp28
tmp31 = tl.load(in_ptr0 + ((-2) + x4), tmp30 & xmask, other=0.0)
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = x1
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp36 & tmp6
tmp38 = tmp37 & tmp7
tmp39 = tl.load(in_ptr0 + ((-1) + x4), tmp38 & xmask, other=0.0)
tmp40 = triton_helpers.maximum(tmp39, tmp32)
tmp41 = tmp36 & tmp13
tmp42 = tmp41 & tmp14
tmp43 = tl.load(in_ptr0 + (x4), tmp42 & xmask, other=0.0)
tmp44 = triton_helpers.maximum(tmp43, tmp40)
tmp45 = tmp36 & tmp20
tmp46 = tmp45 & tmp21
tmp47 = tl.load(in_ptr0 + (1 + x4), tmp46 & xmask, other=0.0)
tmp48 = triton_helpers.maximum(tmp47, tmp44)
tmp49 = tmp36 & tmp27
tmp50 = tmp49 & tmp28
tmp51 = tl.load(in_ptr0 + (2 + x4), tmp50 & xmask, other=0.0)
tmp52 = triton_helpers.maximum(tmp51, tmp48)
tmp53 = 1 + x1
tmp54 = tmp53 >= tmp1
tmp55 = tmp53 < tmp3
tmp56 = tmp54 & tmp55
tmp57 = tmp56 & tmp6
tmp58 = tmp57 & tmp7
tmp59 = tl.load(in_ptr0 + (3 + x4), tmp58 & xmask, other=0.0)
tmp60 = triton_helpers.maximum(tmp59, tmp52)
tmp61 = tmp56 & tmp13
tmp62 = tmp61 & tmp14
tmp63 = tl.load(in_ptr0 + (4 + x4), tmp62 & xmask, other=0.0)
tmp64 = triton_helpers.maximum(tmp63, tmp60)
tmp65 = tmp56 & tmp20
tmp66 = tmp65 & tmp21
tmp67 = tl.load(in_ptr0 + (5 + x4), tmp66 & xmask, other=0.0)
tmp68 = triton_helpers.maximum(tmp67, tmp64)
tmp69 = tmp56 & tmp27
tmp70 = tmp69 & tmp28
tmp71 = tl.load(in_ptr0 + (6 + x4), tmp70 & xmask, other=0.0)
tmp72 = triton_helpers.maximum(tmp71, tmp68)
tmp73 = 2 + x1
tmp74 = tmp73 >= tmp1
tmp75 = tmp73 < tmp3
tmp76 = tmp74 & tmp75
tmp77 = tmp76 & tmp6
tmp78 = tmp77 & tmp7
tmp79 = tl.load(in_ptr0 + (7 + x4), tmp78 & xmask, other=0.0)
tmp80 = triton_helpers.maximum(tmp79, tmp72)
tmp81 = tmp76 & tmp13
tmp82 = tmp81 & tmp14
tmp83 = tl.load(in_ptr0 + (8 + x4), tmp82 & xmask, other=0.0)
tmp84 = triton_helpers.maximum(tmp83, tmp80)
tmp85 = tmp76 & tmp20
tmp86 = tmp85 & tmp21
tmp87 = tl.load(in_ptr0 + (9 + x4), tmp86 & xmask, other=0.0)
tmp88 = triton_helpers.maximum(tmp87, tmp84)
tmp89 = tmp76 & tmp27
tmp90 = tmp89 & tmp28
tmp91 = tl.load(in_ptr0 + (10 + x4), tmp90 & xmask, other=0.0)
tmp92 = triton_helpers.maximum(tmp91, tmp88)
tl.store(out_ptr0 + (x4), tmp92, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [max_pool2d], Original ATen: [aten.max_pool2d_with_indices]
stream0 = get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class MaxPool2dDynamicSamePadding(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(self, kernel_size, stride, padding=0, dilation=1,
return_indices=False, ceil_mode=False):
super().__init__(kernel_size, stride, padding, dilation,
return_indices, ceil_mode)
self.stride = [self.stride] * 2 if isinstance(self.stride, int
) else self.stride
self.kernel_size = [self.kernel_size] * 2 if isinstance(self.
kernel_size, int) else self.kernel_size
self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int
) else self.dilation
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.kernel_size
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] +
1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] +
1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2])
return F.max_pool2d(x, self.kernel_size, self.stride, self.padding,
self.dilation, self.ceil_mode, self.return_indices)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'kernel_size': 4, 'stride': 1}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x0 = xindex % 4
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x4), tmp10 & xmask, other=0.0)
tmp12 = x0
tmp13 = tmp12 >= tmp1
tmp14 = tmp12 < tmp3
tmp15 = tmp8 & tmp13
tmp16 = tmp15 & tmp14
tmp17 = tl.load(in_ptr0 + (-4 + x4), tmp16 & xmask, other=0.0)
tmp18 = triton_helpers.maximum(tmp17, tmp11)
tmp19 = 1 + x0
tmp20 = tmp19 >= tmp1
tmp21 = tmp19 < tmp3
tmp22 = tmp8 & tmp20
tmp23 = tmp22 & tmp21
tmp24 = tl.load(in_ptr0 + (-3 + x4), tmp23 & xmask, other=0.0)
tmp25 = triton_helpers.maximum(tmp24, tmp18)
tmp26 = 2 + x0
tmp27 = tmp26 >= tmp1
tmp28 = tmp26 < tmp3
tmp29 = tmp8 & tmp27
tmp30 = tmp29 & tmp28
tmp31 = tl.load(in_ptr0 + (-2 + x4), tmp30 & xmask, other=0.0)
tmp32 = triton_helpers.maximum(tmp31, tmp25)
tmp33 = x1
tmp34 = tmp33 >= tmp1
tmp35 = tmp33 < tmp3
tmp36 = tmp34 & tmp35
tmp37 = tmp36 & tmp6
tmp38 = tmp37 & tmp7
tmp39 = tl.load(in_ptr0 + (-1 + x4), tmp38 & xmask, other=0.0)
tmp40 = triton_helpers.maximum(tmp39, tmp32)
tmp41 = tmp36 & tmp13
tmp42 = tmp41 & tmp14
tmp43 = tl.load(in_ptr0 + x4, tmp42 & xmask, other=0.0)
tmp44 = triton_helpers.maximum(tmp43, tmp40)
tmp45 = tmp36 & tmp20
tmp46 = tmp45 & tmp21
tmp47 = tl.load(in_ptr0 + (1 + x4), tmp46 & xmask, other=0.0)
tmp48 = triton_helpers.maximum(tmp47, tmp44)
tmp49 = tmp36 & tmp27
tmp50 = tmp49 & tmp28
tmp51 = tl.load(in_ptr0 + (2 + x4), tmp50 & xmask, other=0.0)
tmp52 = triton_helpers.maximum(tmp51, tmp48)
tmp53 = 1 + x1
tmp54 = tmp53 >= tmp1
tmp55 = tmp53 < tmp3
tmp56 = tmp54 & tmp55
tmp57 = tmp56 & tmp6
tmp58 = tmp57 & tmp7
tmp59 = tl.load(in_ptr0 + (3 + x4), tmp58 & xmask, other=0.0)
tmp60 = triton_helpers.maximum(tmp59, tmp52)
tmp61 = tmp56 & tmp13
tmp62 = tmp61 & tmp14
tmp63 = tl.load(in_ptr0 + (4 + x4), tmp62 & xmask, other=0.0)
tmp64 = triton_helpers.maximum(tmp63, tmp60)
tmp65 = tmp56 & tmp20
tmp66 = tmp65 & tmp21
tmp67 = tl.load(in_ptr0 + (5 + x4), tmp66 & xmask, other=0.0)
tmp68 = triton_helpers.maximum(tmp67, tmp64)
tmp69 = tmp56 & tmp27
tmp70 = tmp69 & tmp28
tmp71 = tl.load(in_ptr0 + (6 + x4), tmp70 & xmask, other=0.0)
tmp72 = triton_helpers.maximum(tmp71, tmp68)
tmp73 = 2 + x1
tmp74 = tmp73 >= tmp1
tmp75 = tmp73 < tmp3
tmp76 = tmp74 & tmp75
tmp77 = tmp76 & tmp6
tmp78 = tmp77 & tmp7
tmp79 = tl.load(in_ptr0 + (7 + x4), tmp78 & xmask, other=0.0)
tmp80 = triton_helpers.maximum(tmp79, tmp72)
tmp81 = tmp76 & tmp13
tmp82 = tmp81 & tmp14
tmp83 = tl.load(in_ptr0 + (8 + x4), tmp82 & xmask, other=0.0)
tmp84 = triton_helpers.maximum(tmp83, tmp80)
tmp85 = tmp76 & tmp20
tmp86 = tmp85 & tmp21
tmp87 = tl.load(in_ptr0 + (9 + x4), tmp86 & xmask, other=0.0)
tmp88 = triton_helpers.maximum(tmp87, tmp84)
tmp89 = tmp76 & tmp27
tmp90 = tmp89 & tmp28
tmp91 = tl.load(in_ptr0 + (10 + x4), tmp90 & xmask, other=0.0)
tmp92 = triton_helpers.maximum(tmp91, tmp88)
tl.store(out_ptr0 + x4, tmp92, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_max_pool2d_with_indices_0[grid(256)](arg0_1, buf0,
256, XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class MaxPool2dDynamicSamePaddingNew(nn.MaxPool2d):
"""2D MaxPooling like TensorFlow's 'SAME' mode, with a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(self, kernel_size, stride, padding=0, dilation=1,
return_indices=False, ceil_mode=False):
super().__init__(kernel_size, stride, padding, dilation,
return_indices, ceil_mode)
self.stride = [self.stride] * 2 if isinstance(self.stride, int
) else self.stride
self.kernel_size = [self.kernel_size] * 2 if isinstance(self.
kernel_size, int) else self.kernel_size
self.dilation = [self.dilation] * 2 if isinstance(self.dilation, int
) else self.dilation
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Exdenta/torchsat
|
MaxPool2dDynamicSamePadding
| false | 13,659 |
[
"MIT"
] | 316 |
70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
https://github.com/Exdenta/torchsat/tree/70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
Conv2dDynamicSamePadding
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xs/cxs2a7zwcw5yxvn445xldhvii7772mtsthpxnfawxoahvyf3vtaj.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
# Source node to ATen node mapping:
# x => constant_pad_nd
# Graph fragment:
# %constant_pad_nd : [num_users=2] = call_function[target=torch.ops.aten.constant_pad_nd.default](args = (%primals_1, [1, 2, 1, 2], 0.0), kwargs = {})
triton_poi_fused_constant_pad_nd_0 = async_compile.triton('triton_poi_fused_constant_pad_nd_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_constant_pad_nd_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 7) % 7
x0 = xindex % 7
x2 = (xindex // 49)
x4 = xindex
tmp0 = (-1) + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = (-1) + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + ((-5) + x0 + (4*x1) + (16*x2)), tmp10 & xmask, other=0.0)
tl.store(out_ptr0 + (x4), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%constant_pad_nd, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.constant_pad_nd]
stream0 = get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0.run(primals_1, buf0, 784, grid=grid(784), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Conv2dDynamicSamePadding(nn.Conv2d):
"""2D Convolutions like TensorFlow, for a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]
] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] +
1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] +
1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h -
pad_h // 2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.
padding, self.dilation, self.groups)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_constant_pad_nd_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 7 % 7
x0 = xindex % 7
x2 = xindex // 49
x4 = xindex
tmp0 = -1 + x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = -1 + x0
tmp6 = tmp5 >= tmp1
tmp7 = tmp5 < tmp3
tmp8 = tmp2 & tmp4
tmp9 = tmp8 & tmp6
tmp10 = tmp9 & tmp7
tmp11 = tl.load(in_ptr0 + (-5 + x0 + 4 * x1 + 16 * x2), tmp10 & xmask,
other=0.0)
tl.store(out_ptr0 + x4, tmp11, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7, 7), (196, 49, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_constant_pad_nd_0[grid(784)](primals_1, buf0, 784,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
return buf2, primals_2, buf0
class Conv2dDynamicSamePaddingNew(nn.Conv2d):
"""2D Convolutions like TensorFlow, for a dynamic image size.
The padding is operated in forward function by calculating dynamically.
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0,
dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]
] * 2
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Exdenta/torchsat
|
Conv2dDynamicSamePadding
| false | 13,660 |
[
"MIT"
] | 316 |
70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
https://github.com/Exdenta/torchsat/tree/70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
outconv
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/r3/cr3hlg2dj2d3nmsli5wlcbgrfym3b6ux3uuxd7pl3rggj6domt5d.py
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# pad => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_3, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + ((-1)*(tl_math.abs((-3) + x0))) + ((-4)*(tl_math.abs((-3) + x1))) + (16*x2)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/32/c32v7egt4mupqssam3gmac2qgv3ujprjybthsgweflmot256qqw7.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pad], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf2, primals_1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1, padding_mode='reflect')
def forward(self, x):
x = self.conv(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_ch': 4, 'out_ch': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (15 + -1 * tl_math.abs(-3 + x0) + -4 * tl_math
.abs(-3 + x1) + 16 * x2), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(256)](primals_3, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4, 4), (64, 16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(256)](buf2, primals_2, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
return buf2, primals_1, buf0
class outconvNew(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconvNew, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1, padding_mode='reflect')
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ExplorativeEngineering/LFMNet2
|
outconv
| false | 13,661 |
[
"Apache-2.0"
] | 46 |
3f190be0f047b9e05c69b0a11f99218fd4fc510c
|
https://github.com/ExplorativeEngineering/LFMNet2/tree/3f190be0f047b9e05c69b0a11f99218fd4fc510c
|
SplAtConv2d
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/cq/ccqcunoz44ytyqzy34r7cs2t74rk42s65mtajwcrygug6wcgzq24.py
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => convolution
# x_1 => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ve/cvelarc6diodfr33zpvmvgzuuzkvprbz6m6y4ohg2zlecikuyoyi.py
# Topologically Sorted Source Nodes: [add, gap, gap_1], Original ATen: [aten.add, aten.mean]
# Source node to ATen node mapping:
# add => add
# gap => add_1
# gap_1 => mean
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %getitem_5), kwargs = {})
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%add_1, [-1, -2], True), kwargs = {})
triton_poi_fused_add_mean_1 = async_compile.triton('triton_poi_fused_add_mean_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 1.0
tmp6 = tmp4 / tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t4/ct4hhjqt2vge2xiycaomw3jiwzw326vnuf5jpebeysc4mpxrpciw.py
# Topologically Sorted Source Nodes: [gap_2, gap_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# gap_2 => convolution_1
# gap_3 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_2 = async_compile.triton('triton_poi_fused_convolution_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ym/cymnwinsgdg6rzof735q2mbji4z4uuuzffbkib2mmjzjggqvt5ti.py
# Topologically Sorted Source Nodes: [atten], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# atten => convolution_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_3 = async_compile.triton('triton_poi_fused_convolution_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/al/calq4luacvjpbhaq6oadffm56qzlddnaiqtfsg2rnpgdoin4doyd.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# x_3 => amax, div, exp, sub, sum_1
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%permute, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%permute, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_4 = async_compile.triton('triton_poi_fused__softmax_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = (xindex // 8)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (8*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (4 + x0 + (8*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + (x3), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/np/cnpcapaif5ggvdkeg53tnm4dojax33drj6dlnhtwxrttlaoiy23c.py
# Topologically Sorted Source Nodes: [mul, mul_1, add_2, out], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# add_2 => add_2
# mul => mul
# mul_1 => mul_1
# out => add_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_6, %getitem_2), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_7, %getitem_5), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 0), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_1), kwargs = {})
triton_poi_fused_add_mul_5 = async_compile.triton('triton_poi_fused_add_mul_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (8*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (8*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (4 + x0 + (8*x1)), xmask)
tmp6 = tl.load(in_ptr1 + (4 + x0 + (8*x1)), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tmp7 = tmp5 * tmp6
tmp8 = tmp4 + tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (8, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (8, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_7, (8, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=2, bias=None)
assert_size_stride(buf0, (4, 8, 1, 1), (8, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, 8, 1, 1), (8, 1, 32, 32), 0); del buf0 # reuse
buf9 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x, x_1], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0.run(buf1, primals_2, buf9, 32, grid=grid(32), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, gap, gap_1], Original ATen: [aten.add, aten.mean]
triton_poi_fused_add_mean_1.run(buf1, buf2, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [gap_2], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 1, 1), (32, 1, 1, 1))
buf4 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [gap_2, gap_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_2.run(buf4, primals_5, 128, grid=grid(128), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [atten], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 8, 1, 1), (8, 1, 1, 1))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [atten], Original ATen: [aten.convolution]
triton_poi_fused_convolution_3.run(buf6, primals_7, 32, grid=grid(32), stream=stream0)
del primals_7
buf7 = empty_strided_cuda((4, 2, 1, 4), (8, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_4.run(buf6, buf7, 32, grid=grid(32), stream=stream0)
buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1, add_2, out], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_5.run(buf7, buf1, buf8, 16, grid=grid(16), stream=stream0)
return (buf8, primals_1, primals_3, primals_4, primals_6, reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf2, buf4, buf6, reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 0), reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf9, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((8, 32, 1, 1), (32, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.nn import Module
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv2d
from torch.nn import ReLU
from torch.nn.modules.utils import _pair
class DropBlock2D(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError
class rSoftMax(nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
class SplAtConv2d(Module):
"""Split-Attention Conv2d
"""
def __init__(self, in_channels, channels, kernel_size, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), groups=1, bias=True, radix=2,
reduction_factor=4, rectify=False, rectify_avg=False, norm_layer=
None, dropblock_prob=0.0, **kwargs):
super(SplAtConv2d, self).__init__()
padding = _pair(padding)
self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
self.rectify_avg = rectify_avg
inter_channels = max(in_channels * radix // reduction_factor, 32)
self.radix = radix
self.cardinality = groups
self.channels = channels
self.dropblock_prob = dropblock_prob
if self.rectify:
self.conv = RFConv2d(in_channels, channels * radix, kernel_size,
stride, padding, dilation, groups=groups * radix, bias=bias,
average_mode=rectify_avg, **kwargs)
else:
self.conv = Conv2d(in_channels, channels * radix, kernel_size,
stride, padding, dilation, groups=groups * radix, bias=bias,
**kwargs)
self.use_bn = norm_layer is not None
if self.use_bn:
self.bn0 = norm_layer(channels * radix)
self.relu = ReLU(inplace=True)
self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
if self.use_bn:
self.bn1 = norm_layer(inter_channels)
self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self.
cardinality)
if dropblock_prob > 0.0:
self.dropblock = DropBlock2D(dropblock_prob, 3)
self.rsoftmax = rSoftMax(radix, groups)
def forward(self, x):
x = self.conv(x)
if self.use_bn:
x = self.bn0(x)
if self.dropblock_prob > 0.0:
x = self.dropblock(x)
x = self.relu(x)
batch, rchannel = x.shape[:2]
if self.radix > 1:
if torch.__version__ < '1.5':
splited = torch.split(x, int(rchannel // self.radix), dim=1)
else:
splited = torch.split(x, rchannel // self.radix, dim=1)
gap = sum(splited)
else:
gap = x
gap = F.adaptive_avg_pool2d(gap, 1)
gap = self.fc1(gap)
if self.use_bn:
gap = self.bn1(gap)
gap = self.relu(gap)
atten = self.fc2(gap)
atten = self.rsoftmax(atten).view(batch, -1, 1, 1)
if self.radix > 1:
if torch.__version__ < '1.5':
attens = torch.split(atten, int(rchannel // self.radix), dim=1)
else:
attens = torch.split(atten, rchannel // self.radix, dim=1)
out = sum([(att * split) for att, split in zip(attens, splited)])
else:
out = atten * x
return out.contiguous()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.nn import Module
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv2d
from torch.nn import ReLU
from torch.nn.modules.utils import _pair
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_0(in_out_ptr0,
in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_mean_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp5 = 1.0
tmp6 = tmp4 / tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_2(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_3(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 4
x2 = xindex // 8
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 8 * x2), xmask, eviction_policy='evict_last'
)
tmp2 = tl.load(in_ptr0 + (4 + x0 + 8 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp4 = tmp0 - tmp3
tmp5 = tl_math.exp(tmp4)
tmp6 = tmp1 - tmp3
tmp7 = tl_math.exp(tmp6)
tmp8 = tmp2 - tmp3
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tmp5 / tmp10
tl.store(out_ptr0 + x3, tmp11, xmask)
@triton.jit
def triton_poi_fused_add_mul_5(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 8 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 8 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (4 + x0 + 8 * x1), xmask)
tmp6 = tl.load(in_ptr1 + (4 + x0 + 8 * x1), xmask)
tmp2 = tmp0 * tmp1
tmp3 = 0.0
tmp4 = tmp2 + tmp3
tmp7 = tmp5 * tmp6
tmp8 = tmp4 + tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (8, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (32, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (8, 32, 1, 1), (32, 1, 1, 1))
assert_size_stride(primals_7, (8,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=2, bias=None)
assert_size_stride(buf0, (4, 8, 1, 1), (8, 1, 1, 1))
buf1 = reinterpret_tensor(buf0, (4, 8, 1, 1), (8, 1, 32, 32), 0)
del buf0
buf9 = empty_strided_cuda((4, 8, 1, 1), (8, 1, 1, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_convolution_relu_threshold_backward_0[grid(32)](buf1,
primals_2, buf9, 32, XBLOCK=32, num_warps=1, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_add_mean_1[grid(16)](buf1, buf2, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 1, 1), (32, 1, 1, 1))
buf4 = buf3
del buf3
triton_poi_fused_convolution_relu_2[grid(128)](buf4, primals_5, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_5
buf5 = extern_kernels.convolution(buf4, primals_6, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 8, 1, 1), (8, 1, 1, 1))
buf6 = buf5
del buf5
triton_poi_fused_convolution_3[grid(32)](buf6, primals_7, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_7
buf7 = empty_strided_cuda((4, 2, 1, 4), (8, 4, 4, 1), torch.float32)
triton_poi_fused__softmax_4[grid(32)](buf6, buf7, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
triton_poi_fused_add_mul_5[grid(16)](buf7, buf1, buf8, 16, XBLOCK=
16, num_warps=1, num_stages=1)
return (buf8, primals_1, primals_3, primals_4, primals_6,
reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 0),
reinterpret_tensor(buf1, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf2, buf4,
buf6, reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 0),
reinterpret_tensor(buf7, (4, 4, 1, 1), (8, 1, 1, 1), 4), buf9)
class DropBlock2D(object):
def __init__(self, *args, **kwargs):
raise NotImplementedError
class rSoftMax(nn.Module):
def __init__(self, radix, cardinality):
super().__init__()
self.radix = radix
self.cardinality = cardinality
def forward(self, x):
batch = x.size(0)
if self.radix > 1:
x = x.view(batch, self.cardinality, self.radix, -1).transpose(1, 2)
x = F.softmax(x, dim=1)
x = x.reshape(batch, -1)
else:
x = torch.sigmoid(x)
return x
class SplAtConv2dNew(Module):
"""Split-Attention Conv2d
"""
def __init__(self, in_channels, channels, kernel_size, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), groups=1, bias=True, radix=2,
reduction_factor=4, rectify=False, rectify_avg=False, norm_layer=
None, dropblock_prob=0.0, **kwargs):
super(SplAtConv2dNew, self).__init__()
padding = _pair(padding)
self.rectify = rectify and (padding[0] > 0 or padding[1] > 0)
self.rectify_avg = rectify_avg
inter_channels = max(in_channels * radix // reduction_factor, 32)
self.radix = radix
self.cardinality = groups
self.channels = channels
self.dropblock_prob = dropblock_prob
if self.rectify:
self.conv = RFConv2d(in_channels, channels * radix, kernel_size,
stride, padding, dilation, groups=groups * radix, bias=bias,
average_mode=rectify_avg, **kwargs)
else:
self.conv = Conv2d(in_channels, channels * radix, kernel_size,
stride, padding, dilation, groups=groups * radix, bias=bias,
**kwargs)
self.use_bn = norm_layer is not None
if self.use_bn:
self.bn0 = norm_layer(channels * radix)
self.relu = ReLU(inplace=True)
self.fc1 = Conv2d(channels, inter_channels, 1, groups=self.cardinality)
if self.use_bn:
self.bn1 = norm_layer(inter_channels)
self.fc2 = Conv2d(inter_channels, channels * radix, 1, groups=self.
cardinality)
if dropblock_prob > 0.0:
self.dropblock = DropBlock2D(dropblock_prob, 3)
self.rsoftmax = rSoftMax(radix, groups)
def forward(self, input_0):
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_4 = self.fc1.weight
primals_5 = self.fc1.bias
primals_6 = self.fc2.weight
primals_7 = self.fc2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
Exdenta/torchsat
|
SplAtConv2d
| false | 13,662 |
[
"MIT"
] | 316 |
70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
https://github.com/Exdenta/torchsat/tree/70ea3db758757104fb3ba618ddf7997f0f3a75b4
|
VitMlpHead
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yy/cyya3js6wt64vdji3sfisvrqyfvqxwkwqq5mzg5bqjl2crzjs4t3.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%select,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2g/c2gw7362i2a6wsfdx2sxyywx4o6ronjg6goebvdn44w6gpjsxpbc.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.add, aten.tanh]
# Source node to ATen node mapping:
# x_1 => add
# x_2 => tanh
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_1, %primals_3), kwargs = {})
# %tanh : [num_users=2] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
triton_poi_fused_add_tanh_1 = async_compile.triton('triton_poi_fused_add_tanh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_tanh_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + (x2), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, grid=grid(64), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten.add, aten.tanh]
triton_poi_fused_add_tanh_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_5
return (reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title='input data')
group.add_argument('--input', type=str, required=True, help=
'Path to input JSON')
group.add_argument('--json-keys', nargs='+', default=['text'], help=
'space separate listed of keys to extract from json')
group.add_argument('--split-sentences', action='store_true', help=
'Split documents into sentences.')
group.add_argument('--keep-newlines', action='store_true', help=
'Keep newlines between sentences when splitting.')
group = parser.add_argument_group(title='tokenizer')
group.add_argument('--tokenizer-type', type=str, required=True, choices
=['BertWordPieceLowerCase', 'BertWordPieceCase', 'GPT2BPETokenizer'
], help='What type of tokenizer to use.')
group.add_argument('--vocab-file', type=str, default=None, help=
'Path to the vocab file')
group.add_argument('--merge-file', type=str, default=None, help=
'Path to the BPE merge file (if necessary).')
group.add_argument('--append-eod', action='store_true', help=
'Append an <eod> token to the end of a document.')
group = parser.add_argument_group(title='output data')
group.add_argument('--output-prefix', type=str, required=True, help=
'Path to binary output file without suffix')
group.add_argument('--dataset-impl', type=str, default='mmap', choices=
['lazy', 'cached', 'mmap'])
group = parser.add_argument_group(title='runtime')
group.add_argument('--workers', type=int, default=1, help=
'Number of worker processes to launch')
group.add_argument('--log-interval', type=int, default=100, help=
'Interval between progress updates')
args = parser.parse_args()
args.keep_empty = False
if args.tokenizer_type.lower().startswith('bert'):
if not args.split_sentences:
None
args.rank = 0
args.make_vocab_size_divisible_by = 128
args.tensor_model_parallel_size = 1
args.vocab_extra_ids = 0
return args
def init_method_normal(sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_
class MegatronModule(torch.nn.Module):
"""Megatron specific extensions of torch Module with support
for pipelining."""
def __init__(self, share_word_embeddings=True):
super(MegatronModule, self).__init__()
self.share_word_embeddings = share_word_embeddings
def state_dict_for_save_checkpoint(self, destination=None, prefix='',
keep_vars=False):
"""Use this function to override the state dict for
saving checkpoints."""
return self.state_dict(destination, prefix, keep_vars)
def word_embeddings_weight(self):
if mpu.is_pipeline_first_stage(ignore_virtual=True):
return self.language_model.embedding.word_embeddings.weight
if mpu.is_pipeline_last_stage(ignore_virtual=True):
if not self.share_word_embeddings:
raise Exception(
'word_embeddings_weight() called for last stage, but share_word_embeddings is false'
)
return self.word_embeddings.weight
raise Exception(
'word_embeddings_weight() should be called for first and last stage only'
)
def initialize_word_embeddings(self, init_method_normal):
args = get_args()
if not self.share_word_embeddings:
raise Exception(
'initialize_word_embeddings() was called but share_word_embeddings is false'
)
if args.pipeline_model_parallel_size == 1:
return
if mpu.is_pipeline_last_stage():
assert not mpu.is_pipeline_first_stage()
self._word_embeddings_for_head_key = 'word_embeddings_for_head'
self.word_embeddings = mpu.VocabParallelEmbedding(args.
padded_vocab_size, args.hidden_size, init_method=
init_method_normal(args.init_method_std))
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
if torch.distributed.is_initialized():
if mpu.is_pipeline_first_stage() or mpu.is_pipeline_last_stage():
torch.distributed.all_reduce(self.word_embeddings_weight().
data, group=mpu.get_embedding_group())
else:
None
class VitMlpHead(MegatronModule):
"""Pooler layer.
Pool hidden states of a specific token (for example start of the
sequence) and add a linear transformation followed by a tanh.
Arguments:
hidden_size: hidden size
init_method: weight initialization method for the linear layer.
bias is set to zero.
"""
def __init__(self, hidden_size, num_classes):
super(VitMlpHead, self).__init__()
self.dense_in = torch.nn.Linear(hidden_size, hidden_size)
self.dense_out = torch.nn.Linear(hidden_size, num_classes)
torch.nn.init.constant_(self.dense_out.bias, -10)
def forward(self, hidden_states, sequence_index=0):
x = hidden_states[:, sequence_index, :]
x = self.dense_in(x)
x = torch.tanh(x)
x = self.dense_out(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'hidden_size': 4, 'num_classes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
@triton.jit
def triton_poi_fused_add_tanh_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(in_out_ptr0 + x2, tmp3, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64)](primals_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
triton_poi_fused_add_tanh_1[grid(64)](buf2, primals_3, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf2, (16, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_5
return reinterpret_tensor(buf3, (4, 4, 4), (16, 4, 1), 0
), reinterpret_tensor(buf0, (16, 4), (4, 1), 0), buf2, primals_4
def get_args():
parser = argparse.ArgumentParser()
group = parser.add_argument_group(title='input data')
group.add_argument('--input', type=str, required=True, help=
'Path to input JSON')
group.add_argument('--json-keys', nargs='+', default=['text'], help=
'space separate listed of keys to extract from json')
group.add_argument('--split-sentences', action='store_true', help=
'Split documents into sentences.')
group.add_argument('--keep-newlines', action='store_true', help=
'Keep newlines between sentences when splitting.')
group = parser.add_argument_group(title='tokenizer')
group.add_argument('--tokenizer-type', type=str, required=True, choices
=['BertWordPieceLowerCase', 'BertWordPieceCase', 'GPT2BPETokenizer'
], help='What type of tokenizer to use.')
group.add_argument('--vocab-file', type=str, default=None, help=
'Path to the vocab file')
group.add_argument('--merge-file', type=str, default=None, help=
'Path to the BPE merge file (if necessary).')
group.add_argument('--append-eod', action='store_true', help=
'Append an <eod> token to the end of a document.')
group = parser.add_argument_group(title='output data')
group.add_argument('--output-prefix', type=str, required=True, help=
'Path to binary output file without suffix')
group.add_argument('--dataset-impl', type=str, default='mmap', choices=
['lazy', 'cached', 'mmap'])
group = parser.add_argument_group(title='runtime')
group.add_argument('--workers', type=int, default=1, help=
'Number of worker processes to launch')
group.add_argument('--log-interval', type=int, default=100, help=
'Interval between progress updates')
args = parser.parse_args()
args.keep_empty = False
if args.tokenizer_type.lower().startswith('bert'):
if not args.split_sentences:
None
args.rank = 0
args.make_vocab_size_divisible_by = 128
args.tensor_model_parallel_size = 1
args.vocab_extra_ids = 0
return args
def init_method_normal(sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_
class MegatronModule(torch.nn.Module):
"""Megatron specific extensions of torch Module with support
for pipelining."""
def __init__(self, share_word_embeddings=True):
super(MegatronModule, self).__init__()
self.share_word_embeddings = share_word_embeddings
def state_dict_for_save_checkpoint(self, destination=None, prefix='',
keep_vars=False):
"""Use this function to override the state dict for
saving checkpoints."""
return self.state_dict(destination, prefix, keep_vars)
def word_embeddings_weight(self):
if mpu.is_pipeline_first_stage(ignore_virtual=True):
return self.language_model.embedding.word_embeddings.weight
if mpu.is_pipeline_last_stage(ignore_virtual=True):
if not self.share_word_embeddings:
raise Exception(
'word_embeddings_weight() called for last stage, but share_word_embeddings is false'
)
return self.word_embeddings.weight
raise Exception(
'word_embeddings_weight() should be called for first and last stage only'
)
def initialize_word_embeddings(self, init_method_normal):
args = get_args()
if not self.share_word_embeddings:
raise Exception(
'initialize_word_embeddings() was called but share_word_embeddings is false'
)
if args.pipeline_model_parallel_size == 1:
return
if mpu.is_pipeline_last_stage():
assert not mpu.is_pipeline_first_stage()
self._word_embeddings_for_head_key = 'word_embeddings_for_head'
self.word_embeddings = mpu.VocabParallelEmbedding(args.
padded_vocab_size, args.hidden_size, init_method=
init_method_normal(args.init_method_std))
self.word_embeddings.weight.data.fill_(0)
self.word_embeddings.weight.shared = True
if torch.distributed.is_initialized():
if mpu.is_pipeline_first_stage() or mpu.is_pipeline_last_stage():
torch.distributed.all_reduce(self.word_embeddings_weight().
data, group=mpu.get_embedding_group())
else:
None
class VitMlpHeadNew(MegatronModule):
"""Pooler layer.
Pool hidden states of a specific token (for example start of the
sequence) and add a linear transformation followed by a tanh.
Arguments:
hidden_size: hidden size
init_method: weight initialization method for the linear layer.
bias is set to zero.
"""
def __init__(self, hidden_size, num_classes):
super(VitMlpHeadNew, self).__init__()
self.dense_in = torch.nn.Linear(hidden_size, hidden_size)
self.dense_out = torch.nn.Linear(hidden_size, num_classes)
torch.nn.init.constant_(self.dense_out.bias, -10)
def forward(self, input_0):
primals_2 = self.dense_in.weight
primals_3 = self.dense_in.bias
primals_4 = self.dense_out.weight
primals_5 = self.dense_out.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
ExaSearch/Megatron-DeepSpeed
|
VitMlpHead
| false | 13,663 |
[
"MIT"
] | 71 |
215dcf9fd4d18d9efa1d15d06c3eb85572957bf3
|
https://github.com/ExaSearch/Megatron-DeepSpeed/tree/215dcf9fd4d18d9efa1d15d06c3eb85572957bf3
|
CapOnlyContrastiveLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/rm/crmh7mrmycyydn6ei6x6slqutkmm4twir4dms47khv6pakfenttz.py
# Topologically Sorted Source Nodes: [add, sub, cost_s, sum_1], Original ATen: [aten.add, aten.sub, aten.clamp, aten.sum]
# Source node to ATen node mapping:
# add => add
# cost_s => clamp_min
# sub => sub
# sum_1 => sum_1
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm, 0), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %expand), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%clamp_min,), kwargs = {})
triton_per_fused_add_clamp_sub_sum_0 = async_compile.triton('triton_per_fused_add_clamp_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = (rindex // 4)
tmp0 = tl.load(in_ptr0 + (r2), None)
tmp3 = tl.load(in_ptr1 + (5*r1), None, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = triton_helpers.maximum(tmp4, tmp1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp8, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores], Original ATen: [aten.mm]
extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4), 0), out=buf0)
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scores_orig], Original ATen: [aten.mm]
extern_kernels.mm(arg0_1, reinterpret_tensor(arg2_1, (4, 4), (1, 4), 0), out=buf1)
del arg0_1
del arg2_1
buf2 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [add, sub, cost_s, sum_1], Original ATen: [aten.add, aten.sub, aten.clamp, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_clamp_sub_sum_0.run(buf0, buf1, buf2, 1, 16, grid=grid(1), stream=stream0)
del buf0
del buf1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.init
def cosine_sim(im, s):
"""Cosine similarity between all the image and sentence pairs
"""
return im.mm(s.t())
def order_sim(im, s):
"""Order embeddings similarity measure $max(0, s-im)$
"""
YmX = s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1)
) - im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1))
score = -YmX.clamp(min=0).pow(2).sum(2).sqrt().t()
return score
class CapOnlyContrastiveLoss(nn.Module):
"""
Compute contrastive loss
"""
def __init__(self, margin=0, measure=False, max_violation=False):
super(CapOnlyContrastiveLoss, self).__init__()
self.margin = margin
if measure == 'order':
self.sim = order_sim
else:
self.sim = cosine_sim
self.max_violation = max_violation
def forward(self, im, s, ex_s):
scores = self.sim(im, ex_s)
scores_orig = self.sim(im, s)
diagonal = scores_orig.diag().contiguous().view(im.size(0), 1)
d1 = diagonal.expand_as(scores)
cost_s = (self.margin + scores - d1).clamp(min=0)
if self.max_violation:
cost_s = cost_s.max(1)[0]
return cost_s.sum()
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_add_clamp_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r2 = rindex
r1 = rindex // 4
tmp0 = tl.load(in_ptr0 + r2, None)
tmp3 = tl.load(in_ptr1 + 5 * r1, None, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp5 = triton_helpers.maximum(tmp4, tmp1)
tmp6 = tl.broadcast_to(tmp5, [XBLOCK, RBLOCK])
tmp8 = tl.sum(tmp6, 1)[:, None]
tl.store(out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp8, None)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg0_1, reinterpret_tensor(arg1_1, (4, 4), (1, 4),
0), out=buf0)
del arg1_1
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.mm(arg0_1, reinterpret_tensor(arg2_1, (4, 4), (1, 4),
0), out=buf1)
del arg0_1
del arg2_1
buf2 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_clamp_sub_sum_0[grid(1)](buf0, buf1, buf2, 1,
16, XBLOCK=1, num_warps=2, num_stages=1)
del buf0
del buf1
return buf2,
def cosine_sim(im, s):
"""Cosine similarity between all the image and sentence pairs
"""
return im.mm(s.t())
def order_sim(im, s):
"""Order embeddings similarity measure $max(0, s-im)$
"""
YmX = s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1)
) - im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1))
score = -YmX.clamp(min=0).pow(2).sum(2).sqrt().t()
return score
class CapOnlyContrastiveLossNew(nn.Module):
"""
Compute contrastive loss
"""
def __init__(self, margin=0, measure=False, max_violation=False):
super(CapOnlyContrastiveLossNew, self).__init__()
self.margin = margin
if measure == 'order':
self.sim = order_sim
else:
self.sim = cosine_sim
self.max_violation = max_violation
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
ExplorerFreda/VSE-C
|
CapOnlyContrastiveLoss
| false | 13,664 |
[
"MIT"
] | 61 |
52d7742adfe017eacd74f36a5953ea2ace9f5fce
|
https://github.com/ExplorerFreda/VSE-C/tree/52d7742adfe017eacd74f36a5953ea2ace9f5fce
|
NormedLinear
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/c4/cc4mo2hxcdeykamlqh3edsugtiwa5qgnvsckmhygtvamwp2crsqf.py
# Topologically Sorted Source Nodes: [norm_1, pow_2, add_1, x_, x__1], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# norm_1 => pow_4, pow_5, sum_2
# pow_2 => pow_6
# x_ => div_1
# x__1 => mul
# Graph fragment:
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_4, [1], True), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_5, 1.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_6, 1e-06), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %add_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, 20), kwargs = {})
triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp16 = 20.0
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gr/cgrujw4uhohajox4hmoi742gbs4qqn5s46hnwwj2orhjgu6tkgrn.py
# Topologically Sorted Source Nodes: [norm, pow_1, add, weight_], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# norm => pow_1, pow_2, sum_1
# pow_1 => pow_3
# weight_ => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, 1e-06), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {})
triton_poi_fused_add_div_linalg_vector_norm_pow_1 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x2), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_1, pow_2, add_1, x_, x__1], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.add, aten.div, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0.run(primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, pow_1, add, weight_], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.add, aten.div]
triton_poi_fused_add_div_linalg_vector_norm_pow_1.run(primals_1, buf1, 16, grid=grid(16), stream=stream0)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del buf1
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, **kwargs):
super(NormedLinear, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self):
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x):
weight_ = self.weight / (self.weight.norm(dim=1, keepdim=True).pow(
self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_features': 4, 'out_features': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp16 = 20.0
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_pow_1(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x2, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_mul_pow_0[grid(256)](
primals_2, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_div_linalg_vector_norm_pow_1[grid(16)](primals_1,
buf1, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_3, reinterpret_tensor(buf0, (64, 4), (
4, 1), 0), reinterpret_tensor(buf1, (4, 4), (1, 4), 0), alpha=1,
beta=1, out=buf2)
del buf1
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), primals_1, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class NormedLinearNew(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06, **kwargs):
super(NormedLinearNew, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self):
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
FMsunyh/mmdetection
|
NormedLinear
| false | 13,665 |
[
"Apache-2.0"
] | 240 |
d3683eb06d1041aa3d55f35ad81d8c37718a4c2d
|
https://github.com/FMsunyh/mmdetection/tree/d3683eb06d1041aa3d55f35ad81d8c37718a4c2d
|
Policy
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/md/cmd3ewacyhu5w5hausgbjbmtnt5rr66cgczh4ibdypq7dz6p4v7g.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/o4/co4ltgolfty6xbtjs454crc7imkotqguqwb6zvbpz2luzl3qkzin.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_3, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_3, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 8
x2 = (xindex // 32)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (32*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (8 + x0 + (32*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + (32*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (24 + x0 + (32*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2e/c2ei25xypczil2scpap6sg6cxhom5wssmh3azqrbkeq7nevkrhj7.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 8
x2 = (xindex // 32)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (32*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (8 + x0 + (32*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + (32*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (24 + x0 + (32*x2)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 128), (128, 1))
assert_size_stride(primals_5, (2, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 8192, grid=grid(8192), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
# Topologically Sorted Source Nodes: [action_scores], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128), (128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128), 0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf2, buf3, 128, grid=grid(128), stream=stream0)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf3, buf4, 128, grid=grid(128), stream=stream0)
del buf3
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 128), (128, 1), 0), buf4, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((128, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((2, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((2, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.affine2 = nn.Linear(128, 2)
self.saved_log_probs = []
self.rewards = []
def forward(self, x):
x = F.relu(self.affine1(x))
action_scores = self.affine2(x)
return F.softmax(action_scores, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 8
x2 = xindex // 32
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 8
x2 = xindex // 32
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (8 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (16 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (24 + x0 + 32 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (128, 4), (4, 1))
assert_size_stride(primals_2, (128,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (2, 128), (128, 1))
assert_size_stride(primals_5, (2,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 128), (128, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 128), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 128), (2048, 512, 128, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 128), (2048, 512, 128, 1),
torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(8192)](buf1,
primals_2, buf5, 8192, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 2), (2, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 128),
(128, 1), 0), reinterpret_tensor(primals_4, (128, 2), (1, 128),
0), alpha=1, beta=1, out=buf2)
del primals_5
buf3 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
triton_poi_fused__softmax_1[grid(128)](buf2, buf3, 128, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (4, 4, 4, 2), (32, 8, 2, 1), 0)
del buf2
triton_poi_fused__softmax_2[grid(128)](buf3, buf4, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del buf3
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 128), (128, 1), 0
), buf4, primals_4, buf5
class PolicyNew(nn.Module):
def __init__(self):
super(PolicyNew, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.affine2 = nn.Linear(128, 2)
self.saved_log_probs = []
self.rewards = []
def forward(self, input_0):
primals_1 = self.affine1.weight
primals_2 = self.affine1.bias
primals_4 = self.affine2.weight
primals_5 = self.affine2.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Eunjnnn/ignite
|
Policy
| false | 13,666 |
[
"BSD-3-Clause"
] | 4,119 |
743089705b2b252aa5e2a0f310da3a8724d6711e
|
https://github.com/Eunjnnn/ignite/tree/743089705b2b252aa5e2a0f310da3a8724d6711e
|
Return
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ow/cows2oj57woerizpg54kron4kaw2jyzstjpgyturomqwavaagjrw.py
# Topologically Sorted Source Nodes: [val, sub, mul, add], Original ATen: [aten.sigmoid, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# add => add
# mul => mul
# sub => sub
# val => sigmoid
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%arg0_1,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg2_1, %arg1_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sigmoid, %sub), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg1_1, %mul), kwargs = {})
triton_poi_fused_add_mul_sigmoid_sub_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (0))
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (0))
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp3 = tl.sigmoid(tmp2)
tmp6 = tmp5 - tmp1
tmp7 = tmp3 * tmp6
tmp8 = tmp1 + tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (), ())
assert_size_stride(arg2_1, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [val, sub, mul, add], Original ATen: [aten.sigmoid, aten.sub, aten.mul, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sub_0.run(arg1_1, arg0_1, arg2_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import numpy as np
class Return(torch.nn.Module):
def __init__(self, discount_factor):
super().__init__()
assert 0 <= discount_factor < 1
self.coefficient = 1 / (1 - discount_factor)
self.min_reward = np.float32(-1)
self.max_reward = np.float32(1)
self._low = torch.nn.Parameter(torch.as_tensor(self.coefficient *
self.min_reward, dtype=torch.float32), requires_grad=False)
self._high = torch.nn.Parameter(torch.as_tensor(self.coefficient *
self.max_reward, dtype=torch.float32), requires_grad=False)
def forward(self, val):
val = torch.sigmoid(val)
return self._low + val * (self._high - self._low)
def record(self, values):
for val in values:
if val < self.min_reward:
self.min_reward = np.float32(val)
elif val > self.max_reward:
self.max_reward = np.float32(val)
def update(self):
self._update(self.min_reward, self.max_reward)
def _update(self, min_reward, max_reward):
self._low.data.copy_(torch.as_tensor(self.coefficient * min_reward,
dtype=torch.float32))
self._high.data.copy_(torch.as_tensor(self.coefficient * max_reward,
dtype=torch.float32))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'discount_factor': 0}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sigmoid_sub_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK])
tmp2 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + 0)
tmp5 = tl.broadcast_to(tmp4, [XBLOCK])
tmp3 = tl.sigmoid(tmp2)
tmp6 = tmp5 - tmp1
tmp7 = tmp3 * tmp6
tmp8 = tmp1 + tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (), ())
assert_size_stride(arg2_1, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_sub_0[grid(256)](arg1_1, arg0_1,
arg2_1, buf0, 256, XBLOCK=128, num_warps=4, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
return buf0,
class ReturnNew(torch.nn.Module):
def __init__(self, discount_factor):
super().__init__()
assert 0 <= discount_factor < 1
self.coefficient = 1 / (1 - discount_factor)
self.min_reward = np.float32(-1)
self.max_reward = np.float32(1)
self._low = torch.nn.Parameter(torch.as_tensor(self.coefficient *
self.min_reward, dtype=torch.float32), requires_grad=False)
self._high = torch.nn.Parameter(torch.as_tensor(self.coefficient *
self.max_reward, dtype=torch.float32), requires_grad=False)
def record(self, values):
for val in values:
if val < self.min_reward:
self.min_reward = np.float32(val)
elif val > self.max_reward:
self.max_reward = np.float32(val)
def update(self):
self._update(self.min_reward, self.max_reward)
def _update(self, min_reward, max_reward):
self._low.data.copy_(torch.as_tensor(self.coefficient * min_reward,
dtype=torch.float32))
self._high.data.copy_(torch.as_tensor(self.coefficient * max_reward,
dtype=torch.float32))
def forward(self, input_0):
arg1_1 = self._low
arg2_1 = self._high
arg0_1 = input_0
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
Eyalcohenx/tonic
|
Return
| false | 13,667 |
[
"MIT"
] | 350 |
afc15c6fa23fed4f696f68f0acf961964b0172dc
|
https://github.com/Eyalcohenx/tonic/tree/afc15c6fa23fed4f696f68f0acf961964b0172dc
|
SegmentationLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/a4/ca4guingxxyfvp2zwcpqsunkvad3znrec5ubmsxi4erqtjmnzsuu.py
# Topologically Sorted Source Nodes: [mul_10, signs, mul_11, error, sort, label_sorted, label_sum, cumsum, sub_9, cumsum_1, mul, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sub, aten.rsub, aten.sort, aten.index, aten.sum, aten.cumsum]
# Source node to ATen node mapping:
# cumsum => cumsum
# cumsum_1 => cumsum_1
# error => sub_7
# label_sorted => index
# label_sum => sum_5
# mul => mul
# mul_10 => mul_10
# mul_11 => mul_11
# signs => sub_6
# sort => sort
# sub_9 => sub_9
# sum_1 => sum_1
# sum_2 => sum_2
# sum_3 => sum_3
# Graph fragment:
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, 2.0), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_10, 1.0), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, %sub_6), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %mul_11), kwargs = {})
# %sort : [num_users=2] = call_function[target=torch.ops.aten.sort.default](args = (%sub_7, 0, True), kwargs = {})
# %index : [num_users=3] = call_function[target=torch.ops.aten.index.Tensor](args = (%view_3, [%getitem_1]), kwargs = {})
# %sum_5 : [num_users=2] = call_function[target=torch.ops.aten.sum.default](args = (%index,), kwargs = {})
# %cumsum : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%index, 0), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %index), kwargs = {})
# %cumsum_1 : [num_users=1] = call_function[target=torch.ops.aten.cumsum.default](args = (%sub_9, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view, %view_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul,), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view,), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%view_1,), kwargs = {})
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0 = async_compile.triton('triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {9: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 10), equal_to_1=(9,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0(in_ptr0, in_ptr1, out_ptr0, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tl.load(in_ptr1 + (r0), None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp7 = tmp4 - tmp6
tmp8 = r0
tmp9 = tmp8.to(tl.int16)
tmp10 = tl.broadcast_to(tmp7, [RBLOCK])
tmp11 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12, tmp13, = triton_helpers.sort_with_index(tmp10, tmp11, None, 0, stable=False, descending=True)
tmp14 = tmp0 * tmp1
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = tl.broadcast_to(tmp0, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tl.broadcast_to(tmp1, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp24 = tmp13.to(tl.int64)
tmp25 = tl.full([RBLOCK], 256, tl.int32)
tmp26 = tmp24 + tmp25
tmp27 = tmp24 < 0
tmp28 = tl.where(tmp27, tmp26, tmp24)
tl.device_assert((0 <= tmp28) & (tmp28 < 256), "index out of bounds: 0 <= tmp28 < 256")
tmp30 = tl.load(in_ptr1 + (tmp28), None, eviction_policy='evict_last')
tmp31 = tl.broadcast_to(tmp30, [RBLOCK])
tmp33 = triton_helpers.promote_to_tensor(tl.sum(tmp31, 0))
tmp34 = tmp30.to(tl.float32)
tmp35 = tl.broadcast_to(tmp34, [RBLOCK])
tmp36, = tl.associative_scan((tmp35,), 0, _triton_helper_fn_add0)
tmp37 = tmp4 - tmp30
tmp38 = tmp37.to(tl.float32)
tmp39 = tl.broadcast_to(tmp38, [RBLOCK])
tmp40, = tl.associative_scan((tmp39,), 0, _triton_helper_fn_add0)
tl.store(out_ptr0 + (tl.broadcast_to(r0, [RBLOCK])), tmp12, None)
tl.store(out_ptr6 + (tl.broadcast_to(r0, [RBLOCK])), tmp36, None)
tl.store(out_ptr7 + (tl.broadcast_to(r0, [RBLOCK])), tmp40, None)
tl.store(out_ptr2 + (tl.full([1], 0, tl.int32)), tmp17, None)
tl.store(out_ptr3 + (tl.full([1], 0, tl.int32)), tmp20, None)
tl.store(out_ptr4 + (tl.full([1], 0, tl.int32)), tmp23, None)
tl.store(out_ptr5 + (tl.full([1], 0, tl.int32)), tmp33, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pd/cpdhpdorbxdcg45pirfazewdxiopgjlazao2bvn3ya4mqocpclwm.py
# Topologically Sorted Source Nodes: [mul_5, sub_3, sub_4, mul_6, focal_factor, sub_5, pow_1, clamp, log, mul_3, sub_1, sub_2, clamp_1, log_1, mul_4, add_3, binary_cross_entropy_loss, mul_7, mul_8, sum_4, loss_1], Original ATen: [aten.mul, aten.rsub, aten.add, aten.pow, aten.clamp, aten.log, aten.neg, aten.sum, aten.mean]
# Source node to ATen node mapping:
# add_3 => add_3
# binary_cross_entropy_loss => neg
# clamp => clamp_min
# clamp_1 => clamp_min_1
# focal_factor => add_4
# log => log
# log_1 => log_1
# loss_1 => mean
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# mul_7 => mul_7
# mul_8 => mul_8
# pow_1 => pow_1
# sub_1 => sub_1
# sub_2 => sub_2
# sub_3 => sub_3
# sub_4 => sub_4
# sub_5 => sub_5
# sum_4 => sum_4
# Graph fragment:
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg0_1, %arg1_1), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg0_1), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %sub_4), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_5, %mul_6), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %add_4), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_5, 2.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%arg0_1, 1e-12), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %log), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg1_1), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %arg0_1), kwargs = {})
# %clamp_min_1 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 1e-12), kwargs = {})
# %log_1 : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%clamp_min_1,), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %log_1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %mul_4), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%add_3,), kwargs = {})
# %mul_7 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%pow_1, %neg), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_7, 0.25), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_8, [1]), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_4,), kwargs = {})
triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sum_1 = async_compile.triton('triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sum_1(in_ptr0, in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = (rindex // 16)
r2 = rindex
tmp0 = tl.load(in_ptr0 + (r0 + (64*r1)), None)
tmp1 = tl.load(in_ptr1 + (r0 + (64*r1)), None)
tmp22 = tl.load(in_ptr0 + (16 + r0 + (64*r1)), None)
tmp23 = tl.load(in_ptr1 + (16 + r0 + (64*r1)), None)
tmp42 = tl.load(in_ptr0 + (32 + r0 + (64*r1)), None)
tmp43 = tl.load(in_ptr1 + (32 + r0 + (64*r1)), None)
tmp62 = tl.load(in_ptr0 + (48 + r0 + (64*r1)), None)
tmp63 = tl.load(in_ptr1 + (48 + r0 + (64*r1)), None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = tmp3 - tmp1
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp8 = tmp3 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = 1e-12
tmp11 = triton_helpers.maximum(tmp0, tmp10)
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 * tmp12
tmp14 = triton_helpers.maximum(tmp4, tmp10)
tmp15 = tl_math.log(tmp14)
tmp16 = tmp5 * tmp15
tmp17 = tmp13 + tmp16
tmp18 = -tmp17
tmp19 = tmp9 * tmp18
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp3 - tmp22
tmp26 = tmp3 - tmp23
tmp27 = tmp25 * tmp26
tmp28 = tmp24 + tmp27
tmp29 = tmp3 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = triton_helpers.maximum(tmp22, tmp10)
tmp32 = tl_math.log(tmp31)
tmp33 = tmp23 * tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp10)
tmp35 = tl_math.log(tmp34)
tmp36 = tmp26 * tmp35
tmp37 = tmp33 + tmp36
tmp38 = -tmp37
tmp39 = tmp30 * tmp38
tmp40 = tmp39 * tmp20
tmp41 = tmp21 + tmp40
tmp44 = tmp42 * tmp43
tmp45 = tmp3 - tmp42
tmp46 = tmp3 - tmp43
tmp47 = tmp45 * tmp46
tmp48 = tmp44 + tmp47
tmp49 = tmp3 - tmp48
tmp50 = tmp49 * tmp49
tmp51 = triton_helpers.maximum(tmp42, tmp10)
tmp52 = tl_math.log(tmp51)
tmp53 = tmp43 * tmp52
tmp54 = triton_helpers.maximum(tmp45, tmp10)
tmp55 = tl_math.log(tmp54)
tmp56 = tmp46 * tmp55
tmp57 = tmp53 + tmp56
tmp58 = -tmp57
tmp59 = tmp50 * tmp58
tmp60 = tmp59 * tmp20
tmp61 = tmp41 + tmp60
tmp64 = tmp62 * tmp63
tmp65 = tmp3 - tmp62
tmp66 = tmp3 - tmp63
tmp67 = tmp65 * tmp66
tmp68 = tmp64 + tmp67
tmp69 = tmp3 - tmp68
tmp70 = tmp69 * tmp69
tmp71 = triton_helpers.maximum(tmp62, tmp10)
tmp72 = tl_math.log(tmp71)
tmp73 = tmp63 * tmp72
tmp74 = triton_helpers.maximum(tmp65, tmp10)
tmp75 = tl_math.log(tmp74)
tmp76 = tmp66 * tmp75
tmp77 = tmp73 + tmp76
tmp78 = -tmp77
tmp79 = tmp70 * tmp78
tmp80 = tmp79 * tmp20
tmp81 = tmp61 + tmp80
tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK])
tmp84 = tl.sum(tmp82, 1)[:, None]
tl.store(out_ptr1 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp84, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6m/c6mt24bg4otmnpljfp5gsz737s7p7tnwnu67u7ne6vi3k3cs6a6v.py
# Topologically Sorted Source Nodes: [intersection, union, truediv_1, iou, tensor, mul_1, add, add_1, add_2, truediv, loss, mul_2, loss_1, mul_9, add_5, relu, sub_11, loss_2, mul_12, add_7], Original ATen: [aten.sub, aten.add, aten.div, aten.rsub, aten.lift_fresh, aten.mul, aten.mean, aten.relu, aten.dot]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# add_5 => add_5
# add_7 => add_7
# intersection => sub_8
# iou => sub_10
# loss => sub
# loss_1 => mean
# loss_2 => mul_12, sum_6
# mul_1 => mul_1
# mul_12 => mul_13
# mul_2 => mul_2
# mul_9 => mul_9
# relu => relu
# sub_11 => sub_11
# tensor => full_default
# truediv => div
# truediv_1 => div_1
# union => add_6
# Graph fragment:
# %sub_8 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sum_5, %cumsum), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_5, %cumsum_1), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_8, %add_6), kwargs = {})
# %sub_10 : [num_users=4] = call_function[target=torch.ops.aten.sub.Tensor](args = (1.0, %div_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 1.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_1, 2.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, 1.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_2, %sum_3), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_1, 1.0), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add, %add_2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%full_default, %div), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, 1.0), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.default](args = (%sum_4,), kwargs = {})
# %mul_9 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mean, 0.2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %mul_9), kwargs = {})
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%getitem,), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_1, %slice_2), kwargs = {})
# %slice_scatter_default : [num_users=1] = call_function[target=torch.ops.aten.slice_scatter.default](args = (%sub_10, %sub_11, 0, 1, 9223372036854775807), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%relu, %slice_scatter_default), kwargs = {})
# %sum_6 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_12,), kwargs = {})
# %mul_13 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sum_6, 0.0), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %mul_13), kwargs = {})
triton_per_fused_add_div_dot_lift_fresh_mean_mul_relu_rsub_sub_2 = async_compile.triton('triton_per_fused_add_div_dot_lift_fresh_mean_mul_relu_rsub_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {8: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=(8,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_dot_lift_fresh_mean_mul_relu_rsub_sub_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 12, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_dot_lift_fresh_mean_mul_relu_rsub_sub_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp6 = tl.load(in_out_ptr0 + (0))
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp24 = tl.load(in_ptr1 + (r0), None)
tmp26 = tl.load(in_ptr2 + (r0), None)
tmp35 = tl.load(in_ptr3 + (0))
tmp36 = tl.broadcast_to(tmp35, [1])
tmp40 = tl.load(in_ptr4 + (0))
tmp41 = tl.broadcast_to(tmp40, [1])
tmp42 = tl.load(in_ptr5 + (0))
tmp43 = tl.broadcast_to(tmp42, [1])
tmp49 = tl.load(in_ptr6 + (0))
tmp50 = tl.broadcast_to(tmp49, [1])
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = r0
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 >= tmp4
tmp8 = tl.load(in_ptr1 + (tl.broadcast_to(r0, [RBLOCK])), tmp5, other=0.0)
tmp9 = tmp7 - tmp8
tmp10 = tl.load(in_ptr2 + (tl.broadcast_to(r0, [RBLOCK])), tmp5, other=0.0)
tmp11 = tmp7 + tmp10
tmp12 = tmp9 / tmp11
tmp13 = 1.0
tmp14 = tmp13 - tmp12
tmp15 = tl.load(in_ptr1 + (tl.broadcast_to((-1) + r0, [RBLOCK])), tmp5, other=0.0)
tmp16 = tmp7 - tmp15
tmp17 = tl.load(in_ptr2 + (tl.broadcast_to((-1) + r0, [RBLOCK])), tmp5, other=0.0)
tmp18 = tmp7 + tmp17
tmp19 = tmp16 / tmp18
tmp20 = tmp13 - tmp19
tmp21 = tmp14 - tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp5, tmp21, tmp22)
tmp25 = tmp7 - tmp24
tmp27 = tmp7 + tmp26
tmp28 = tmp25 / tmp27
tmp29 = tmp13 - tmp28
tmp30 = tl.where(tmp5, tmp23, tmp29)
tmp31 = tmp2 * tmp30
tmp32 = tl.broadcast_to(tmp31, [RBLOCK])
tmp34 = triton_helpers.promote_to_tensor(tl.sum(tmp32, 0))
tmp37 = 2.0
tmp38 = tmp36 * tmp37
tmp39 = tmp38 + tmp13
tmp44 = tmp41 + tmp43
tmp45 = tmp44 + tmp13
tmp46 = tmp39 / tmp45
tmp47 = tmp13 - tmp46
tmp48 = tmp47 * tmp13
tmp51 = 64.0
tmp52 = tmp50 / tmp51
tmp53 = 0.2
tmp54 = tmp52 * tmp53
tmp55 = tmp48 + tmp54
tmp56 = 0.0
tmp57 = tmp34 * tmp56
tmp58 = tmp55 + tmp57
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp58, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256, ), (1, ), torch.float32)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = empty_strided_cuda((), (), torch.float32)
buf7 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((256, ), (1, ), torch.float32)
buf4 = empty_strided_cuda((256, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [mul_10, signs, mul_11, error, sort, label_sorted, label_sum, cumsum, sub_9, cumsum_1, mul, sum_1, sum_2, sum_3], Original ATen: [aten.mul, aten.sub, aten.rsub, aten.sort, aten.index, aten.sum, aten.cumsum]
stream0 = get_raw_stream(0)
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0.run(arg0_1, arg1_1, buf0, buf5, buf6, buf7, buf2, buf3, buf4, 1, 256, grid=grid(1), stream=stream0)
buf9 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [mul_5, sub_3, sub_4, mul_6, focal_factor, sub_5, pow_1, clamp, log, mul_3, sub_1, sub_2, clamp_1, log_1, mul_4, add_3, binary_cross_entropy_loss, mul_7, mul_8, sum_4, loss_1], Original ATen: [aten.mul, aten.rsub, aten.add, aten.pow, aten.clamp, aten.log, aten.neg, aten.sum, aten.mean]
triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sum_1.run(arg0_1, arg1_1, buf9, 1, 64, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
buf10 = buf2; del buf2 # reuse
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [intersection, union, truediv_1, iou, tensor, mul_1, add, add_1, add_2, truediv, loss, mul_2, loss_1, mul_9, add_5, relu, sub_11, loss_2, mul_12, add_7], Original ATen: [aten.sub, aten.add, aten.div, aten.rsub, aten.lift_fresh, aten.mul, aten.mean, aten.relu, aten.dot]
triton_per_fused_add_div_dot_lift_fresh_mean_mul_relu_rsub_sub_2.run(buf11, buf0, buf3, buf4, buf5, buf6, buf7, buf9, 1, 256, grid=grid(1), stream=stream0)
del buf0
del buf3
del buf4
del buf5
del buf6
del buf7
del buf9
return (buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class LovaszHingeLoss(nn.Module):
"""
This class implements the lovasz hinge loss which is the continuous of the IoU for binary segmentation.
Source: https://github.com/bermanmaxim/LovaszSoftmax
"""
def __init__(self) ->None:
"""
Constructor method
"""
super(LovaszHingeLoss, self).__init__()
def _calc_grad(self, label_sorted: 'torch.Tensor') ->torch.Tensor:
"""
Method computes the gradients of the sorted and flattened label
:param label_sorted: (torch.Tensor) Sorted and flattened label of shape [n]
:return: (torch.Tensor) Gradient tensor
"""
label_sum = label_sorted.sum()
intersection = label_sum - label_sorted.cumsum(dim=0)
union = label_sum + (1 - label_sorted).cumsum(dim=0)
iou = 1.0 - intersection / union
iou[1:] = iou[1:] - iou[0:-1]
return iou
def forward(self, prediction: 'torch.Tensor', label: 'torch.Tensor'
) ->torch.Tensor:
"""
Forward pass computes the dice loss
:param prediction: (torch.Tensor) Prediction
:param label: (torch.Tensor) Label
:return: (torch.Tensor) Dice loss value
"""
prediction = prediction.flatten(start_dim=0)
label = label.flatten(start_dim=0)
signs = 2.0 * label - 1.0
error = 1.0 - prediction * signs
errors_sorted, permutation = torch.sort(error, dim=0, descending=True)
label_sorted = label[permutation]
grad = self._calc_grad(label_sorted)
loss = torch.dot(F.relu(errors_sorted), grad)
return loss
class DiceLoss(nn.Module):
"""
This class implements the dice loss for multiple instances
"""
def __init__(self, smooth_factor: 'float'=1.0) ->None:
super(DiceLoss, self).__init__()
self.smooth_factor = smooth_factor
def __repr__(self):
"""
Get representation of the loss module
:return: (str) String including information
"""
return '{}, smooth factor={}'.format(self.__class__.__name__, self.
smooth_factor)
def forward(self, prediction: 'torch.Tensor', label: 'torch.Tensor'
) ->torch.Tensor:
"""
Forward pass computes the dice loss
:param prediction: (torch.Tensor) Prediction
:param label: (torch.Tensor) Label
:return: (torch.Tensor) Dice loss value
"""
prediction = prediction.flatten(start_dim=0)
label = label.flatten(start_dim=0)
loss = torch.tensor(1.0, dtype=torch.float32, device=prediction.device
) - (2.0 * torch.sum(torch.mul(prediction, label)) + self.
smooth_factor) / (torch.sum(prediction) + torch.sum(label) +
self.smooth_factor)
return loss
class FocalLoss(nn.Module):
"""
This class implements the segmentation focal loss.
https://arxiv.org/abs/1708.02002
"""
def __init__(self, alpha: 'float'=0.25, gamma: 'float'=2.0) ->None:
"""
Constructor method
:param alpha: (float) Alpha constant
:param gamma: (float) Gamma constant (see paper)
"""
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
def __repr__(self):
"""
Get representation of the loss module
:return: (str) String including information
"""
return '{}, alpha={}, gamma={}'.format(self.__class__.__name__,
self.alpha, self.gamma)
def forward(self, prediction: 'torch.Tensor', label: 'torch.Tensor'
) ->torch.Tensor:
"""
Forward pass computes the binary cross entropy loss of segmentation masks
:param prediction: (torch.Tensor) Prediction probability
:param label: (torch.Tensor) Label one-hot encoded
:return: (torch.Tensor) Loss value
"""
binary_cross_entropy_loss = -(label * torch.log(prediction.clamp(
min=1e-12)) + (1.0 - label) * torch.log((1.0 - prediction).
clamp(min=1e-12)))
focal_factor = prediction * label + (1.0 - prediction) * (1.0 - label)
loss = ((1.0 - focal_factor) ** self.gamma *
binary_cross_entropy_loss * self.alpha).sum(dim=1).mean()
return loss
class SegmentationLoss(nn.Module):
"""
This class implement the segmentation loss.
"""
def __init__(self, dice_loss: 'nn.Module'=DiceLoss(), focal_loss:
'nn.Module'=FocalLoss(), lovasz_hinge_loss: 'nn.Module'=
LovaszHingeLoss(), w_dice: 'float'=1.0, w_focal: 'float'=0.2,
w_lovasz_hinge: 'float'=0.0) ->None:
super(SegmentationLoss, self).__init__()
self.dice_loss = dice_loss
self.focal_loss = focal_loss
self.lovasz_hinge_loss = lovasz_hinge_loss
self.w_dice = w_dice
self.w_focal = w_focal
self.w_lovasz_hinge = w_lovasz_hinge
def __repr__(self):
"""
Get representation of the loss module
:return: (str) String including information
"""
return ('{}, {}, w_focal={}, {}, w_dice={}, {}, w_lovasz_hinge={}'.
format(self.__class__.__name__, self.dice_loss.__class__.
__name__, self.w_dice, self.focal_loss.__class__.__name__, self
.w_focal, self.lovasz_hinge_loss.__class__.__name__, self.
w_lovasz_hinge))
def forward(self, prediction: 'torch.Tensor', label: 'torch.Tensor'
) ->torch.Tensor:
"""
Forward pass computes the segmentation loss
:param prediction: (torch.Tensor) Prediction
:param label: (torch.Tensor) Label
:return: (torch.Tensor) Loss value
"""
return self.w_dice * self.dice_loss(prediction, label
) + self.w_focal * self.focal_loss(prediction, label
) + self.w_lovasz_hinge * self.lovasz_hinge_loss(prediction, label)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def _triton_helper_fn_add0(arg0_0, arg1_0):
tmp0 = arg0_0 + arg1_0
return tmp0
@triton.jit
def triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr2, out_ptr3, out_ptr4, out_ptr5, out_ptr6, out_ptr7,
xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tl.load(in_ptr1 + r0, None)
tmp2 = 2.0
tmp3 = tmp1 * tmp2
tmp4 = 1.0
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp7 = tmp4 - tmp6
tmp8 = r0
tmp9 = tmp8.to(tl.int16)
tmp10 = tl.broadcast_to(tmp7, [RBLOCK])
tmp11 = tl.broadcast_to(tmp9, [RBLOCK])
tmp12, tmp13 = triton_helpers.sort_with_index(tmp10, tmp11, None, 0,
stable=False, descending=True)
tmp14 = tmp0 * tmp1
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = tl.broadcast_to(tmp0, [RBLOCK])
tmp20 = triton_helpers.promote_to_tensor(tl.sum(tmp18, 0))
tmp21 = tl.broadcast_to(tmp1, [RBLOCK])
tmp23 = triton_helpers.promote_to_tensor(tl.sum(tmp21, 0))
tmp24 = tmp13.to(tl.int64)
tmp25 = tl.full([RBLOCK], 256, tl.int32)
tmp26 = tmp24 + tmp25
tmp27 = tmp24 < 0
tmp28 = tl.where(tmp27, tmp26, tmp24)
tl.device_assert((0 <= tmp28) & (tmp28 < 256),
'index out of bounds: 0 <= tmp28 < 256')
tmp30 = tl.load(in_ptr1 + tmp28, None, eviction_policy='evict_last')
tmp31 = tl.broadcast_to(tmp30, [RBLOCK])
tmp33 = triton_helpers.promote_to_tensor(tl.sum(tmp31, 0))
tmp34 = tmp30.to(tl.float32)
tmp35 = tl.broadcast_to(tmp34, [RBLOCK])
tmp36, = tl.associative_scan((tmp35,), 0, _triton_helper_fn_add0)
tmp37 = tmp4 - tmp30
tmp38 = tmp37.to(tl.float32)
tmp39 = tl.broadcast_to(tmp38, [RBLOCK])
tmp40, = tl.associative_scan((tmp39,), 0, _triton_helper_fn_add0)
tl.store(out_ptr0 + tl.broadcast_to(r0, [RBLOCK]), tmp12, None)
tl.store(out_ptr6 + tl.broadcast_to(r0, [RBLOCK]), tmp36, None)
tl.store(out_ptr7 + tl.broadcast_to(r0, [RBLOCK]), tmp40, None)
tl.store(out_ptr2 + tl.full([1], 0, tl.int32), tmp17, None)
tl.store(out_ptr3 + tl.full([1], 0, tl.int32), tmp20, None)
tl.store(out_ptr4 + tl.full([1], 0, tl.int32), tmp23, None)
tl.store(out_ptr5 + tl.full([1], 0, tl.int32), tmp33, None)
@triton.jit
def triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sum_1(in_ptr0,
in_ptr1, out_ptr1, xnumel, rnumel, XBLOCK: tl.constexpr):
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r0 = rindex % 16
r1 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r0 + 64 * r1), None)
tmp1 = tl.load(in_ptr1 + (r0 + 64 * r1), None)
tmp22 = tl.load(in_ptr0 + (16 + r0 + 64 * r1), None)
tmp23 = tl.load(in_ptr1 + (16 + r0 + 64 * r1), None)
tmp42 = tl.load(in_ptr0 + (32 + r0 + 64 * r1), None)
tmp43 = tl.load(in_ptr1 + (32 + r0 + 64 * r1), None)
tmp62 = tl.load(in_ptr0 + (48 + r0 + 64 * r1), None)
tmp63 = tl.load(in_ptr1 + (48 + r0 + 64 * r1), None)
tmp2 = tmp0 * tmp1
tmp3 = 1.0
tmp4 = tmp3 - tmp0
tmp5 = tmp3 - tmp1
tmp6 = tmp4 * tmp5
tmp7 = tmp2 + tmp6
tmp8 = tmp3 - tmp7
tmp9 = tmp8 * tmp8
tmp10 = 1e-12
tmp11 = triton_helpers.maximum(tmp0, tmp10)
tmp12 = tl_math.log(tmp11)
tmp13 = tmp1 * tmp12
tmp14 = triton_helpers.maximum(tmp4, tmp10)
tmp15 = tl_math.log(tmp14)
tmp16 = tmp5 * tmp15
tmp17 = tmp13 + tmp16
tmp18 = -tmp17
tmp19 = tmp9 * tmp18
tmp20 = 0.25
tmp21 = tmp19 * tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp3 - tmp22
tmp26 = tmp3 - tmp23
tmp27 = tmp25 * tmp26
tmp28 = tmp24 + tmp27
tmp29 = tmp3 - tmp28
tmp30 = tmp29 * tmp29
tmp31 = triton_helpers.maximum(tmp22, tmp10)
tmp32 = tl_math.log(tmp31)
tmp33 = tmp23 * tmp32
tmp34 = triton_helpers.maximum(tmp25, tmp10)
tmp35 = tl_math.log(tmp34)
tmp36 = tmp26 * tmp35
tmp37 = tmp33 + tmp36
tmp38 = -tmp37
tmp39 = tmp30 * tmp38
tmp40 = tmp39 * tmp20
tmp41 = tmp21 + tmp40
tmp44 = tmp42 * tmp43
tmp45 = tmp3 - tmp42
tmp46 = tmp3 - tmp43
tmp47 = tmp45 * tmp46
tmp48 = tmp44 + tmp47
tmp49 = tmp3 - tmp48
tmp50 = tmp49 * tmp49
tmp51 = triton_helpers.maximum(tmp42, tmp10)
tmp52 = tl_math.log(tmp51)
tmp53 = tmp43 * tmp52
tmp54 = triton_helpers.maximum(tmp45, tmp10)
tmp55 = tl_math.log(tmp54)
tmp56 = tmp46 * tmp55
tmp57 = tmp53 + tmp56
tmp58 = -tmp57
tmp59 = tmp50 * tmp58
tmp60 = tmp59 * tmp20
tmp61 = tmp41 + tmp60
tmp64 = tmp62 * tmp63
tmp65 = tmp3 - tmp62
tmp66 = tmp3 - tmp63
tmp67 = tmp65 * tmp66
tmp68 = tmp64 + tmp67
tmp69 = tmp3 - tmp68
tmp70 = tmp69 * tmp69
tmp71 = triton_helpers.maximum(tmp62, tmp10)
tmp72 = tl_math.log(tmp71)
tmp73 = tmp63 * tmp72
tmp74 = triton_helpers.maximum(tmp65, tmp10)
tmp75 = tl_math.log(tmp74)
tmp76 = tmp66 * tmp75
tmp77 = tmp73 + tmp76
tmp78 = -tmp77
tmp79 = tmp70 * tmp78
tmp80 = tmp79 * tmp20
tmp81 = tmp61 + tmp80
tmp82 = tl.broadcast_to(tmp81, [XBLOCK, RBLOCK])
tmp84 = tl.sum(tmp82, 1)[:, None]
tl.store(out_ptr1 + tl.full([XBLOCK, 1], 0, tl.int32), tmp84, None)
@triton.jit
def triton_per_fused_add_div_dot_lift_fresh_mean_mul_relu_rsub_sub_2(
in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp6 = tl.load(in_out_ptr0 + 0)
tmp7 = tl.broadcast_to(tmp6, [RBLOCK])
tmp24 = tl.load(in_ptr1 + r0, None)
tmp26 = tl.load(in_ptr2 + r0, None)
tmp35 = tl.load(in_ptr3 + 0)
tmp36 = tl.broadcast_to(tmp35, [1])
tmp40 = tl.load(in_ptr4 + 0)
tmp41 = tl.broadcast_to(tmp40, [1])
tmp42 = tl.load(in_ptr5 + 0)
tmp43 = tl.broadcast_to(tmp42, [1])
tmp49 = tl.load(in_ptr6 + 0)
tmp50 = tl.broadcast_to(tmp49, [1])
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp3 = r0
tmp4 = tl.full([1], 1, tl.int64)
tmp5 = tmp3 >= tmp4
tmp8 = tl.load(in_ptr1 + tl.broadcast_to(r0, [RBLOCK]), tmp5, other=0.0)
tmp9 = tmp7 - tmp8
tmp10 = tl.load(in_ptr2 + tl.broadcast_to(r0, [RBLOCK]), tmp5, other=0.0)
tmp11 = tmp7 + tmp10
tmp12 = tmp9 / tmp11
tmp13 = 1.0
tmp14 = tmp13 - tmp12
tmp15 = tl.load(in_ptr1 + tl.broadcast_to(-1 + r0, [RBLOCK]), tmp5,
other=0.0)
tmp16 = tmp7 - tmp15
tmp17 = tl.load(in_ptr2 + tl.broadcast_to(-1 + r0, [RBLOCK]), tmp5,
other=0.0)
tmp18 = tmp7 + tmp17
tmp19 = tmp16 / tmp18
tmp20 = tmp13 - tmp19
tmp21 = tmp14 - tmp20
tmp22 = tl.full(tmp21.shape, 0.0, tmp21.dtype)
tmp23 = tl.where(tmp5, tmp21, tmp22)
tmp25 = tmp7 - tmp24
tmp27 = tmp7 + tmp26
tmp28 = tmp25 / tmp27
tmp29 = tmp13 - tmp28
tmp30 = tl.where(tmp5, tmp23, tmp29)
tmp31 = tmp2 * tmp30
tmp32 = tl.broadcast_to(tmp31, [RBLOCK])
tmp34 = triton_helpers.promote_to_tensor(tl.sum(tmp32, 0))
tmp37 = 2.0
tmp38 = tmp36 * tmp37
tmp39 = tmp38 + tmp13
tmp44 = tmp41 + tmp43
tmp45 = tmp44 + tmp13
tmp46 = tmp39 / tmp45
tmp47 = tmp13 - tmp46
tmp48 = tmp47 * tmp13
tmp51 = 64.0
tmp52 = tmp50 / tmp51
tmp53 = 0.2
tmp54 = tmp52 * tmp53
tmp55 = tmp48 + tmp54
tmp56 = 0.0
tmp57 = tmp34 * tmp56
tmp58 = tmp55 + tmp57
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp58, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((256,), (1,), torch.float32)
buf5 = empty_strided_cuda((), (), torch.float32)
buf6 = empty_strided_cuda((), (), torch.float32)
buf7 = empty_strided_cuda((), (), torch.float32)
buf2 = empty_strided_cuda((), (), torch.float32)
buf3 = empty_strided_cuda((256,), (1,), torch.float32)
buf4 = empty_strided_cuda((256,), (1,), torch.float32)
get_raw_stream(0)
triton_per_fused_cumsum_index_mul_rsub_sort_sub_sum_0[grid(1)](arg0_1,
arg1_1, buf0, buf5, buf6, buf7, buf2, buf3, buf4, 1, 256,
num_warps=2, num_stages=1)
buf9 = empty_strided_cuda((), (), torch.float32)
triton_per_fused_add_clamp_log_mean_mul_neg_pow_rsub_sum_1[grid(1)](
arg0_1, arg1_1, buf9, 1, 64, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
buf10 = buf2
del buf2
buf11 = buf10
del buf10
triton_per_fused_add_div_dot_lift_fresh_mean_mul_relu_rsub_sub_2[grid
(1)](buf11, buf0, buf3, buf4, buf5, buf6, buf7, buf9, 1, 256,
num_warps=2, num_stages=1)
del buf0
del buf3
del buf4
del buf5
del buf6
del buf7
del buf9
return buf11,
class LovaszHingeLoss(nn.Module):
"""
This class implements the lovasz hinge loss which is the continuous of the IoU for binary segmentation.
Source: https://github.com/bermanmaxim/LovaszSoftmax
"""
def __init__(self) ->None:
"""
Constructor method
"""
super(LovaszHingeLoss, self).__init__()
def _calc_grad(self, label_sorted: 'torch.Tensor') ->torch.Tensor:
"""
Method computes the gradients of the sorted and flattened label
:param label_sorted: (torch.Tensor) Sorted and flattened label of shape [n]
:return: (torch.Tensor) Gradient tensor
"""
label_sum = label_sorted.sum()
intersection = label_sum - label_sorted.cumsum(dim=0)
union = label_sum + (1 - label_sorted).cumsum(dim=0)
iou = 1.0 - intersection / union
iou[1:] = iou[1:] - iou[0:-1]
return iou
def forward(self, prediction: 'torch.Tensor', label: 'torch.Tensor'
) ->torch.Tensor:
"""
Forward pass computes the dice loss
:param prediction: (torch.Tensor) Prediction
:param label: (torch.Tensor) Label
:return: (torch.Tensor) Dice loss value
"""
prediction = prediction.flatten(start_dim=0)
label = label.flatten(start_dim=0)
signs = 2.0 * label - 1.0
error = 1.0 - prediction * signs
errors_sorted, permutation = torch.sort(error, dim=0, descending=True)
label_sorted = label[permutation]
grad = self._calc_grad(label_sorted)
loss = torch.dot(F.relu(errors_sorted), grad)
return loss
class DiceLoss(nn.Module):
"""
This class implements the dice loss for multiple instances
"""
def __init__(self, smooth_factor: 'float'=1.0) ->None:
super(DiceLoss, self).__init__()
self.smooth_factor = smooth_factor
def __repr__(self):
"""
Get representation of the loss module
:return: (str) String including information
"""
return '{}, smooth factor={}'.format(self.__class__.__name__, self.
smooth_factor)
def forward(self, prediction: 'torch.Tensor', label: 'torch.Tensor'
) ->torch.Tensor:
"""
Forward pass computes the dice loss
:param prediction: (torch.Tensor) Prediction
:param label: (torch.Tensor) Label
:return: (torch.Tensor) Dice loss value
"""
prediction = prediction.flatten(start_dim=0)
label = label.flatten(start_dim=0)
loss = torch.tensor(1.0, dtype=torch.float32, device=prediction.device
) - (2.0 * torch.sum(torch.mul(prediction, label)) + self.
smooth_factor) / (torch.sum(prediction) + torch.sum(label) +
self.smooth_factor)
return loss
class FocalLoss(nn.Module):
"""
This class implements the segmentation focal loss.
https://arxiv.org/abs/1708.02002
"""
def __init__(self, alpha: 'float'=0.25, gamma: 'float'=2.0) ->None:
"""
Constructor method
:param alpha: (float) Alpha constant
:param gamma: (float) Gamma constant (see paper)
"""
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
def __repr__(self):
"""
Get representation of the loss module
:return: (str) String including information
"""
return '{}, alpha={}, gamma={}'.format(self.__class__.__name__,
self.alpha, self.gamma)
def forward(self, prediction: 'torch.Tensor', label: 'torch.Tensor'
) ->torch.Tensor:
"""
Forward pass computes the binary cross entropy loss of segmentation masks
:param prediction: (torch.Tensor) Prediction probability
:param label: (torch.Tensor) Label one-hot encoded
:return: (torch.Tensor) Loss value
"""
binary_cross_entropy_loss = -(label * torch.log(prediction.clamp(
min=1e-12)) + (1.0 - label) * torch.log((1.0 - prediction).
clamp(min=1e-12)))
focal_factor = prediction * label + (1.0 - prediction) * (1.0 - label)
loss = ((1.0 - focal_factor) ** self.gamma *
binary_cross_entropy_loss * self.alpha).sum(dim=1).mean()
return loss
class SegmentationLossNew(nn.Module):
"""
This class implement the segmentation loss.
"""
def __init__(self, dice_loss: 'nn.Module'=DiceLoss(), focal_loss:
'nn.Module'=FocalLoss(), lovasz_hinge_loss: 'nn.Module'=
LovaszHingeLoss(), w_dice: 'float'=1.0, w_focal: 'float'=0.2,
w_lovasz_hinge: 'float'=0.0) ->None:
super(SegmentationLossNew, self).__init__()
self.dice_loss = dice_loss
self.focal_loss = focal_loss
self.lovasz_hinge_loss = lovasz_hinge_loss
self.w_dice = w_dice
self.w_focal = w_focal
self.w_lovasz_hinge = w_lovasz_hinge
def __repr__(self):
"""
Get representation of the loss module
:return: (str) String including information
"""
return ('{}, {}, w_focal={}, {}, w_dice={}, {}, w_lovasz_hinge={}'.
format(self.__class__.__name__, self.dice_loss.__class__.
__name__, self.w_dice, self.focal_loss.__class__.__name__, self
.w_focal, self.lovasz_hinge_loss.__class__.__name__, self.
w_lovasz_hinge))
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
ChristophReich1996/Cell-DETR
|
SegmentationLoss
| false | 13,668 |
[
"MIT"
] | 55 |
4d0c3a2d3ffd19184c8443e5b3a6dccc053c77ea
|
https://github.com/ChristophReich1996/Cell-DETR/tree/4d0c3a2d3ffd19184c8443e5b3a6dccc053c77ea
|
SobLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yd/cydw4qoacv5kdxqi5det4u6ra3jt2jfmrzlritr23tnqktdeu44o.py
# Topologically Sorted Source Nodes: [hdiff, norm], Original ATen: [aten.sub, aten.linalg_vector_norm]
# Source node to ATen node mapping:
# hdiff => sub
# norm => pow_1, pow_2, sum_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%slice_1, %slice_2), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 4), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.25), kwargs = {})
triton_per_fused_linalg_vector_norm_sub_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_sub_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_sub_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 1
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (64 + r0), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + (r0), rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(rmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = 0.25
tmp10 = libdevice.pow(tmp8, tmp9)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([XBLOCK, 1], 0, tl.int32)), tmp10, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [hdiff, norm], Original ATen: [aten.sub, aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_sub_0.run(buf1, arg0_1, 1, 192, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
class SobLoss(torch.nn.Module):
"""
Sobolev norm penalty on function
(sum |x_{i} - x{i+1}|^p)^{1/p}
parameters:
p - dimension of norm
"""
def __init__(self, p):
super(SobLoss, self).__init__()
self.p = p
def forward(self, beta):
hdiff = beta[1:] - beta[:-1]
return torch.norm(hdiff, p=self.p)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'p': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_sub_0(in_out_ptr0, in_ptr0, xnumel,
rnumel, XBLOCK: tl.constexpr):
rnumel = 192
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[None, :]
rmask = rindex < rnumel
r0 = rindex
tmp0 = tl.load(in_ptr0 + (64 + r0), rmask, other=0.0)
tmp1 = tl.load(in_ptr0 + r0, rmask, other=0.0)
tmp2 = tmp0 - tmp1
tmp3 = tmp2 * tmp2
tmp4 = tmp3 * tmp3
tmp5 = tl.broadcast_to(tmp4, [XBLOCK, RBLOCK])
tmp7 = tl.where(rmask, tmp5, 0)
tmp8 = tl.sum(tmp7, 1)[:, None]
tmp9 = 0.25
tmp10 = libdevice.pow(tmp8, tmp9)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([XBLOCK, 1], 0, tl.int32), tmp10, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_sub_0[grid(1)](buf1, arg0_1, 1,
192, XBLOCK=1, num_warps=2, num_stages=1)
del arg0_1
return buf1,
class SobLossNew(torch.nn.Module):
"""
Sobolev norm penalty on function
(sum |x_{i} - x{i+1}|^p)^{1/p}
parameters:
p - dimension of norm
"""
def __init__(self, p):
super(SobLossNew, self).__init__()
self.p = p
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Filco306/TopologyLayer
|
SobLoss
| false | 13,669 |
[
"MIT"
] | 250 |
1d6261017a80cff0ee06bb896ded40777b0989b4
|
https://github.com/Filco306/TopologyLayer/tree/1d6261017a80cff0ee06bb896ded40777b0989b4
|
ShiftedConv
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/bi/cbirdtqu75ekpesssmflsmp3pefzvdh6ju6mdlels6qajyoqabvh.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# x_1 => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%full_default, %permute], 2), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 112
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 7
x1 = (xindex // 7) % 4
x2 = (xindex // 28)
x3 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype)
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tmp0 >= tmp3
tmp9 = tl.full([1], 7, tl.int64)
tmp10 = tmp0 < tmp9
tmp11 = tl.load(in_ptr0 + (x1 + (4*((-3) + x0)) + (16*x2)), tmp8 & xmask, eviction_policy='evict_last', other=0.0)
tmp12 = tl.where(tmp4, tmp7, tmp11)
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2v/c2vq3pqmx7c3oxfp7p5dhdjlsuc23intbs2hzh7vg3guwwm2uicn.py
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.mul]
# Source node to ATen node mapping:
# x_2 => convolution
# x_3 => mul
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.3535533905932738), kwargs = {})
triton_poi_fused_convolution_mul_1 = async_compile.triton('triton_poi_fused_convolution_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.3535533905932738
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, buf0, 112, grid=grid(112), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_2, x_3], Original ATen: [aten.convolution, aten.mul]
triton_poi_fused_convolution_mul_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
import torch.nn as nn
from numpy import prod
def getLayerNormalizationFactor(x):
"""
Get He's constant for the given layer
https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf
"""
size = x.weight.size()
fan_in = prod(size[1:])
return math.sqrt(2.0 / fan_in)
class ConstrainedLayer(nn.Module):
"""
A handy refactor that allows the user to:
- initialize one layer's bias to zero
- apply He's initialization at runtime
"""
def __init__(self, module, equalized=True, lrMul=1.0, initBiasToZero=True):
"""
equalized (bool): if true, the layer's weight should evolve within
the range (-1, 1)
initBiasToZero (bool): if true, bias will be initialized to zero
"""
super(ConstrainedLayer, self).__init__()
self.module = module
self.equalized = equalized
if initBiasToZero and module.bias is not None:
self.module.bias.data.fill_(0)
if self.equalized:
self.module.weight.data.normal_(0, 1)
self.weight = getLayerNormalizationFactor(self.module) * lrMul
def forward(self, x):
x = self.module(x)
if self.equalized:
x *= self.weight
return x
class EqualizedConv1d(ConstrainedLayer):
def __init__(self, nChannelsPrevious, nChannels, kernelSize, padding=0,
bias=True, stride=1, **kwargs):
"""
A nn.Conv2d module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
kernelSize (int): size of the convolutional kernel
padding (int): convolution's padding
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self, nn.Conv1d(nChannelsPrevious,
nChannels, kernelSize, padding=padding, bias=bias, stride=
stride), **kwargs)
class ShiftedConv(nn.Module):
def __init__(self, dimOutputAR, dimOutputEncoder, kernelSize):
super(ShiftedConv, self).__init__()
self.module = EqualizedConv1d(dimOutputAR, dimOutputEncoder,
kernelSize, equalized=True, padding=0)
self.kernelSize = kernelSize
def forward(self, x):
N, _S, C = x.size()
x = x.permute(0, 2, 1)
padding = torch.zeros(N, C, self.kernelSize - 1, device=x.device)
x = torch.cat([padding, x], dim=2)
x = self.module(x)
x = x.permute(0, 2, 1)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dimOutputAR': 4, 'dimOutputEncoder': 4, 'kernelSize': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import math
import torch.nn as nn
from numpy import prod
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 112
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 7
x1 = xindex // 7 % 4
x2 = xindex // 28
x3 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 3, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = 0.0
tmp6 = tl.full(tmp5.shape, 0.0, tmp5.dtype)
tmp7 = tl.where(tmp4, tmp5, tmp6)
tmp8 = tmp0 >= tmp3
tl.full([1], 7, tl.int64)
tmp11 = tl.load(in_ptr0 + (x1 + 4 * (-3 + x0) + 16 * x2), tmp8 & xmask,
eviction_policy='evict_last', other=0.0)
tmp12 = tl.where(tmp4, tmp7, tmp11)
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_convolution_mul_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.3535533905932738
tmp4 = tmp2 * tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 7), (28, 7, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(112)](primals_1, buf0, 112, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
buf2 = buf1
del buf1
triton_poi_fused_convolution_mul_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, buf0
def getLayerNormalizationFactor(x):
"""
Get He's constant for the given layer
https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf
"""
size = x.weight.size()
fan_in = prod(size[1:])
return math.sqrt(2.0 / fan_in)
class ConstrainedLayer(nn.Module):
"""
A handy refactor that allows the user to:
- initialize one layer's bias to zero
- apply He's initialization at runtime
"""
def __init__(self, module, equalized=True, lrMul=1.0, initBiasToZero=True):
"""
equalized (bool): if true, the layer's weight should evolve within
the range (-1, 1)
initBiasToZero (bool): if true, bias will be initialized to zero
"""
super(ConstrainedLayer, self).__init__()
self.module = module
self.equalized = equalized
if initBiasToZero and module.bias is not None:
self.module.bias.data.fill_(0)
if self.equalized:
self.module.weight.data.normal_(0, 1)
self.weight = getLayerNormalizationFactor(self.module) * lrMul
def forward(self, x):
x = self.module(x)
if self.equalized:
x *= self.weight
return x
class EqualizedConv1d(ConstrainedLayer):
def __init__(self, nChannelsPrevious, nChannels, kernelSize, padding=0,
bias=True, stride=1, **kwargs):
"""
A nn.Conv2d module with specific constraints
Args:
nChannelsPrevious (int): number of channels in the previous layer
nChannels (int): number of channels of the current layer
kernelSize (int): size of the convolutional kernel
padding (int): convolution's padding
bias (bool): with bias ?
"""
ConstrainedLayer.__init__(self, nn.Conv1d(nChannelsPrevious,
nChannels, kernelSize, padding=padding, bias=bias, stride=
stride), **kwargs)
class ShiftedConvNew(nn.Module):
def __init__(self, dimOutputAR, dimOutputEncoder, kernelSize):
super(ShiftedConvNew, self).__init__()
self.module = EqualizedConv1d(dimOutputAR, dimOutputEncoder,
kernelSize, equalized=True, padding=0)
self.kernelSize = kernelSize
def forward(self, input_0):
primals_1 = self.module.module.weight
primals_3 = self.module.module.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EyalSel/CPC_audio
|
ShiftedConv
| false | 13,670 |
[
"MIT"
] | 260 |
b98a1bdf1fe9ea219816db7a6c28115d404a3510
|
https://github.com/EyalSel/CPC_audio/tree/b98a1bdf1fe9ea219816db7a6c28115d404a3510
|
LogisticCumulativeLink
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/cy/ccy7yru2zdy7ixr5e4xphx2c7ryepqm2xt3yzpntkputvlz2y3ua.py
# Topologically Sorted Source Nodes: [getitem_2], Original ATen: [aten.lift_fresh]
# Source node to ATen node mapping:
# getitem_2 => full_default
# Graph fragment:
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([1], 0), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_lift_fresh_0 = async_compile.triton('triton_poi_fused_lift_fresh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {1: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=(1,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_lift_fresh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_lift_fresh_0(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.full([1], 0, tl.int64)
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ge/cgeru34cvt6nnywd3qmie5evkuyzszcwjdamafkyhj6upvphaydq.py
# Topologically Sorted Source Nodes: [getitem_3], Original ATen: [aten.lift_fresh]
# Source node to ATen node mapping:
# getitem_3 => full_default_1
# Graph fragment:
# %full_default_1 : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([1], -1), kwargs = {dtype: torch.int64, layout: torch.strided, device: cuda:0, pin_memory: False})
triton_poi_fused_lift_fresh_1 = async_compile.triton('triton_poi_fused_lift_fresh_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {1: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=(1,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_lift_fresh_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_lift_fresh_1(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.full([1], -1, tl.int64)
tl.store(out_ptr0 + (tl.full([XBLOCK], 0, tl.int32)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ub/cubbhho3d4olluhydb6hq6avsqsjszp5c44cbvmmno7salnl32jt.py
# Topologically Sorted Source Nodes: [link_mat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# link_mat_1 => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%index, %sub_1, %sub_2], 1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = (xindex // 12) % 5
x0 = xindex % 3
x3 = (xindex // 60)
x4 = xindex % 12
x5 = xindex
tmp0 = x2
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr1 + (x4 + (48*x3)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tl.sigmoid(tmp7)
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + (x0), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (12 + x4 + (12*((-1) + x2)) + (48*x3)), tmp14 & xmask, other=0.0)
tmp17 = tmp15 - tmp16
tmp18 = tl.sigmoid(tmp17)
tmp19 = tl.load(in_ptr1 + (x4 + (12*((-1) + x2)) + (48*x3)), tmp14 & xmask, other=0.0)
tmp20 = tmp15 - tmp19
tmp21 = tl.sigmoid(tmp20)
tmp22 = tmp18 - tmp21
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp14, tmp22, tmp23)
tmp25 = tmp0 >= tmp12
tmp26 = tl.full([1], 5, tl.int64)
tmp27 = tmp0 < tmp26
tmp28 = tl.load(in_ptr0 + (x0), tmp25 & xmask, eviction_policy='evict_last', other=0.0)
tmp29 = tl.load(in_ptr1 + (36 + x4 + (48*x3)), tmp25 & xmask, eviction_policy='evict_last', other=0.0)
tmp30 = tmp28 - tmp29
tmp31 = tl.sigmoid(tmp30)
tmp32 = 1.0
tmp33 = tmp32 - tmp31
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp25, tmp33, tmp34)
tmp36 = tl.where(tmp14, tmp24, tmp35)
tmp37 = tl.where(tmp4, tmp10, tmp36)
tl.store(out_ptr0 + (x5), tmp37, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (3, ), (1, ))
assert_size_stride(primals_2, (4, 4, 4, 3), (48, 12, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [getitem_2], Original ATen: [aten.lift_fresh]
stream0 = get_raw_stream(0)
triton_poi_fused_lift_fresh_0.run(buf0, 1, grid=grid(1), stream=stream0)
buf1 = empty_strided_cuda((1, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [getitem_3], Original ATen: [aten.lift_fresh]
triton_poi_fused_lift_fresh_1.run(buf1, 1, grid=grid(1), stream=stream0)
buf2 = empty_strided_cuda((4, 5, 4, 3), (60, 12, 3, 1), torch.float32)
# Topologically Sorted Source Nodes: [link_mat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(primals_1, primals_2, buf2, 240, grid=grid(240), stream=stream0)
return (buf2, primals_1, primals_2, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 3), (48, 12, 3, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class LogisticCumulativeLink(nn.Module):
"""
Converts a single number to the proportional odds of belonging to a class.
Parameters
----------
num_classes : int
Number of ordered classes to partition the odds into.
init_cutpoints : str (default='ordered')
How to initialize the cutpoints of the model. Valid values are
- ordered : cutpoints are initialized to halfway between each class.
- random : cutpoints are initialized with random values.
"""
def __init__(self, num_classes: 'int', init_cutpoints: 'str'='ordered'
) ->None:
assert num_classes > 2, 'Only use this model if you have 3 or more classes'
super().__init__()
self.num_classes = num_classes
self.init_cutpoints = init_cutpoints
if init_cutpoints == 'ordered':
num_cutpoints = self.num_classes - 1
cutpoints = torch.arange(num_cutpoints).float() - num_cutpoints / 2
self.cutpoints = nn.Parameter(cutpoints)
elif init_cutpoints == 'random':
cutpoints = torch.rand(self.num_classes - 1).sort()[0]
self.cutpoints = nn.Parameter(cutpoints)
else:
raise ValueError(
f'{init_cutpoints} is not a valid init_cutpoints type')
def forward(self, X: 'torch.Tensor') ->torch.Tensor:
"""
Equation (11) from
"On the consistency of ordinal regression methods", Pedregosa et. al.
"""
sigmoids = torch.sigmoid(self.cutpoints - X)
link_mat = sigmoids[:, 1:] - sigmoids[:, :-1]
link_mat = torch.cat((sigmoids[:, [0]], link_mat, 1 - sigmoids[:, [
-1]]), dim=1)
return link_mat
def get_inputs():
return [torch.rand([4, 4, 4, 3])]
def get_init_inputs():
return [[], {'num_classes': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_lift_fresh_0(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.full([1], 0, tl.int64)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp0, None)
@triton.jit
def triton_poi_fused_lift_fresh_1(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
tmp0 = tl.full([1], -1, tl.int64)
tl.store(out_ptr0 + tl.full([XBLOCK], 0, tl.int32), tmp0, None)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 240
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex // 12 % 5
x0 = xindex % 3
x3 = xindex // 60
x4 = xindex % 12
x5 = xindex
tmp0 = x2
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x0, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tl.load(in_ptr1 + (x4 + 48 * x3), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tmp5 - tmp6
tmp8 = tl.sigmoid(tmp7)
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 4, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr0 + x0, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tl.load(in_ptr1 + (12 + x4 + 12 * (-1 + x2) + 48 * x3), tmp14 &
xmask, other=0.0)
tmp17 = tmp15 - tmp16
tmp18 = tl.sigmoid(tmp17)
tmp19 = tl.load(in_ptr1 + (x4 + 12 * (-1 + x2) + 48 * x3), tmp14 &
xmask, other=0.0)
tmp20 = tmp15 - tmp19
tmp21 = tl.sigmoid(tmp20)
tmp22 = tmp18 - tmp21
tmp23 = tl.full(tmp22.shape, 0.0, tmp22.dtype)
tmp24 = tl.where(tmp14, tmp22, tmp23)
tmp25 = tmp0 >= tmp12
tl.full([1], 5, tl.int64)
tmp28 = tl.load(in_ptr0 + x0, tmp25 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp29 = tl.load(in_ptr1 + (36 + x4 + 48 * x3), tmp25 & xmask,
eviction_policy='evict_last', other=0.0)
tmp30 = tmp28 - tmp29
tmp31 = tl.sigmoid(tmp30)
tmp32 = 1.0
tmp33 = tmp32 - tmp31
tmp34 = tl.full(tmp33.shape, 0.0, tmp33.dtype)
tmp35 = tl.where(tmp25, tmp33, tmp34)
tmp36 = tl.where(tmp14, tmp24, tmp35)
tmp37 = tl.where(tmp4, tmp10, tmp36)
tl.store(out_ptr0 + x5, tmp37, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (3,), (1,))
assert_size_stride(primals_2, (4, 4, 4, 3), (48, 12, 3, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1,), (1,), torch.int64)
get_raw_stream(0)
triton_poi_fused_lift_fresh_0[grid(1)](buf0, 1, XBLOCK=1, num_warps
=1, num_stages=1)
buf1 = empty_strided_cuda((1,), (1,), torch.int64)
triton_poi_fused_lift_fresh_1[grid(1)](buf1, 1, XBLOCK=1, num_warps
=1, num_stages=1)
buf2 = empty_strided_cuda((4, 5, 4, 3), (60, 12, 3, 1), torch.float32)
triton_poi_fused_cat_2[grid(240)](primals_1, primals_2, buf2, 240,
XBLOCK=256, num_warps=4, num_stages=1)
return buf2, primals_1, primals_2, buf0, buf1
class LogisticCumulativeLinkNew(nn.Module):
"""
Converts a single number to the proportional odds of belonging to a class.
Parameters
----------
num_classes : int
Number of ordered classes to partition the odds into.
init_cutpoints : str (default='ordered')
How to initialize the cutpoints of the model. Valid values are
- ordered : cutpoints are initialized to halfway between each class.
- random : cutpoints are initialized with random values.
"""
def __init__(self, num_classes: 'int', init_cutpoints: 'str'='ordered'
) ->None:
assert num_classes > 2, 'Only use this model if you have 3 or more classes'
super().__init__()
self.num_classes = num_classes
self.init_cutpoints = init_cutpoints
if init_cutpoints == 'ordered':
num_cutpoints = self.num_classes - 1
cutpoints = torch.arange(num_cutpoints).float() - num_cutpoints / 2
self.cutpoints = nn.Parameter(cutpoints)
elif init_cutpoints == 'random':
cutpoints = torch.rand(self.num_classes - 1).sort()[0]
self.cutpoints = nn.Parameter(cutpoints)
else:
raise ValueError(
f'{init_cutpoints} is not a valid init_cutpoints type')
def forward(self, input_0):
primals_1 = self.cutpoints
primals_2 = input_0
output = call([primals_1, primals_2])
return output[0]
|
EthanRosenthal/medallion
|
LogisticCumulativeLink
| false | 13,671 |
[
"MIT"
] | 74 |
063fe875f5122063e6f616512cffd9ffa4df1974
|
https://github.com/EthanRosenthal/medallion/tree/063fe875f5122063e6f616512cffd9ffa4df1974
|
NormedConv2d
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qs/cqsb5uykgs6dxtfkaqn2dtogrggqadkzu2etk6n3k2ycbuu3j5xj.py
# Topologically Sorted Source Nodes: [norm, pow_1, add, weight_], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.add, aten.div]
# Source node to ATen node mapping:
# add => add
# norm => pow_1, pow_2, sum_1
# pow_1 => pow_3
# weight_ => div
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, [1], True), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.5), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_2, 1.0), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, 1e-06), kwargs = {})
# %div : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %add), kwargs = {})
triton_poi_fused_add_div_linalg_vector_norm_pow_0 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_pow_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_pow_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_pow_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + (x3), tmp15, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fb/cfb2mhtqkqxdf4at5lwvqvxh4rnjehaoepwqh6tscuxefrmmf3in.py
# Topologically Sorted Source Nodes: [norm_1, pow_2, add_1, x_, x__1], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.add, aten.div, aten.mul]
# Source node to ATen node mapping:
# add_1 => add_1
# norm_1 => pow_4, pow_5, sum_2
# pow_2 => pow_6
# x_ => div_1
# x__1 => mul
# Graph fragment:
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_2, 2), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_4, [1], True), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_2, 0.5), kwargs = {})
# %pow_6 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%pow_5, 1.0), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_6, 1e-06), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_2, %add_1), kwargs = {})
# %mul : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div_1, 20), kwargs = {})
triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1 = async_compile.triton('triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp16 = 20.0
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + (x3), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/k2/ck2mamkqpmuzem4n3p4ij6fmfpy2bcbblg6sx6wwslgqwuqq5ifh.py
# Topologically Sorted Source Nodes: [x__2], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x__2 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mul, %div, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm, pow_1, add, weight_], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.add, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_pow_0.run(primals_1, buf0, 256, grid=grid(256), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [norm_1, pow_2, add_1, x_, x__1], Original ATen: [aten.linalg_vector_norm, aten.pow, aten.add, aten.div, aten.mul]
triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1.run(primals_2, buf1, 256, grid=grid(256), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [x__2], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [x__2], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf3, primals_3, 16, grid=grid(16), stream=stream0)
del primals_3
return (buf3, primals_1, buf0, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Default to False.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06,
norm_over_kernel=False, **kwargs):
super(NormedConv2d, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x):
if not self.norm_over_kernel:
weight_ = self.weight / (self.weight.norm(dim=1, keepdim=True).
pow(self.power) + self.eps)
else:
weight_ = self.weight / (self.weight.view(self.weight.size(0),
-1).norm(dim=1, keepdim=True).pow(self.power)[..., None,
None] + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
elif torch.__version__ >= '1.8':
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'kernel_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_pow_0(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tl.store(out_ptr0 + x3, tmp15, xmask)
@triton.jit
def triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = 1e-06
tmp14 = tmp12 + tmp13
tmp15 = tmp0 / tmp14
tmp16 = 20.0
tmp17 = tmp15 * tmp16
tl.store(out_ptr0 + x3, tmp17, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_linalg_vector_norm_pow_0[grid(256)](primals_1,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_div_linalg_vector_norm_mul_pow_1[grid(256)](
primals_2, buf1, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
buf2 = extern_kernels.convolution(buf1, buf0, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_2[grid(16)](buf3, primals_3, 16,
XBLOCK=16, num_warps=1, num_stages=1)
del primals_3
return buf3, primals_1, buf0, buf1
class NormedConv2dNew(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Default to False.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-06,
norm_over_kernel=False, **kwargs):
super(NormedConv2dNew, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, input_0):
primals_1 = self.weight
primals_3 = self.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
FMsunyh/mmdetection
|
NormedConv2d
| false | 13,672 |
[
"Apache-2.0"
] | 240 |
d3683eb06d1041aa3d55f35ad81d8c37718a4c2d
|
https://github.com/FMsunyh/mmdetection/tree/d3683eb06d1041aa3d55f35ad81d8c37718a4c2d
|
TemperatureTanh
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ec/cecjvjp2acumgry4drsgyc6jvkjne457wloc3c5e2pdgcu7yu33t.py
# Topologically Sorted Source Nodes: [truediv, tanh], Original ATen: [aten.div, aten.tanh]
# Source node to ATen node mapping:
# tanh => tanh
# truediv => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, 1.0), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%div,), kwargs = {})
triton_poi_fused_div_tanh_0 = async_compile.triton('triton_poi_fused_div_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_tanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + (x0), tmp3, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [truediv, tanh], Original ATen: [aten.div, aten.tanh]
stream0 = get_raw_stream(0)
triton_poi_fused_div_tanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import Tensor
from torch.functional import Tensor
from torch import nn as nn
class TemperatureTanh(nn.Module):
def __init__(self, temperature: 'float'=1.0) ->None:
"""The hyperbolic tangent with an optional temperature."""
super().__init__()
assert temperature != 0.0, 'temperature must be nonzero.'
self._T = temperature
self.tanh = torch.nn.Tanh()
def forward(self, x: 'Tensor') ->Tensor:
return self.tanh(x / self._T)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_div_tanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = libdevice.tanh(tmp2)
tl.store(out_ptr0 + x0, tmp3, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_tanh_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class TemperatureTanhNew(nn.Module):
def __init__(self, temperature: 'float'=1.0) ->None:
"""The hyperbolic tangent with an optional temperature."""
super().__init__()
assert temperature != 0.0, 'temperature must be nonzero.'
self._T = temperature
self.tanh = torch.nn.Tanh()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Felix2048/VLN-CE
|
TemperatureTanh
| false | 13,673 |
[
"MIT"
] | 106 |
4ea21f2af0d869ae65dd6677a53e788233f93761
|
https://github.com/Felix2048/VLN-CE/tree/4ea21f2af0d869ae65dd6677a53e788233f93761
|
Net
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/cy/ccy75molnj4pqcxpogtedljmbw6x3iqcylupg4qp2msv7iy3fvjt.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 784) % 6
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/v7/cv7qi7gg3bpfwb3hj7zgy5jlgh7x7wdgqsfsodkjsoverxdjlf6z.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x => getitem, getitem_1
# Graph fragment:
# %getitem : [num_users=2] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 0), kwargs = {})
# %getitem_1 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_1 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i8', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = (xindex // 14)
x2 = (xindex // 1176)
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + ((2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + (2*x0) + (56*x3)), xmask, eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + (1184*x2)), tmp6, xmask)
tl.store(out_ptr1 + (x4 + (1280*x2)), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/fd/cfd3q6x464cl7vept5hkzz2av6ogtk5up7qocwljfaq5nbponjso.py
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%getitem, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 100) % 16
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tn/ctnw4tbgfy47ppke77vu7rtiz7dl5o3ahickx4p64n7c5rmrrix6.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
# Source node to ATen node mapping:
# x_1 => _low_memory_max_pool2d_with_offsets_1, getitem_3
# Graph fragment:
# %_low_memory_max_pool2d_with_offsets_1 : [num_users=2] = call_function[target=torch.ops.prims._low_memory_max_pool2d_with_offsets.default](args = (%convolution_1, [2, 2], [2, 2], [0, 0], [1, 1], False), kwargs = {})
# %getitem_3 : [num_users=1] = call_function[target=operator.getitem](args = (%_low_memory_max_pool2d_with_offsets_1, 1), kwargs = {})
triton_poi_fused_max_pool2d_with_indices_3 = async_compile.triton('triton_poi_fused_max_pool2d_with_indices_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i8', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_max_pool2d_with_indices_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = (xindex // 5)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + (2*x0) + (20*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + (x2), tmp15, xmask)
tl.store(out_ptr1 + (x2), tmp16, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16, ), (1, ))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120, ), (1, ))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84, ), (1, ))
assert_size_stride(primals_10, (10, 84), (84, 1))
assert_size_stride(primals_11, (10, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(buf1, primals_2, 18816, grid=grid(18816), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch.int8)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_1.run(buf1, buf2, buf3, 4704, grid=grid(4704), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf5, primals_5, 6400, grid=grid(6400), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.max_pool2d_with_indices]
triton_poi_fused_max_pool2d_with_indices_3.run(buf5, buf6, buf7, 1600, grid=grid(1600), stream=stream0)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400), 0), alpha=1, beta=1, out=buf8)
del primals_7
buf9 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8, (120, 84), (1, 120), 0), alpha=1, beta=1, out=buf9)
del primals_9
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, buf9, reinterpret_tensor(primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf10)
del primals_11
return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5, buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf8, buf9, primals_10, primals_8, primals_6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 3, 5, 5), (75, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 3, 32, 32), (3072, 1024, 32, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((16, 6, 5, 5), (150, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((16, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((120, 400), (400, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((120, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((84, 120), (120, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((84, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((10, 84), (84, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((10, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as tnn
class Net(tnn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = tnn.Conv2d(3, 6, 5)
self.pool = tnn.MaxPool2d(2, 2)
self.conv2 = tnn.Conv2d(6, 16, 5)
self.fc1 = tnn.Linear(16 * 5 * 5, 120)
self.fc2 = tnn.Linear(120, 84)
self.fc3 = tnn.Linear(84, 10)
def forward(self, x):
x = self.pool(self.conv1(x))
x = self.pool(self.conv2(x))
x = x.view(-1, 16 * 5 * 5)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
def get_inputs():
return [torch.rand([4, 3, 32, 32])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as tnn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 18816
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 784 % 6
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4704
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 14
x3 = xindex // 14
x2 = xindex // 1176
x4 = xindex % 1176
tmp0 = tl.load(in_ptr0 + (2 * x0 + 56 * x3), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 56 * x3), xmask, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (28 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (29 + 2 * x0 + 56 * x3), xmask,
eviction_policy='evict_last')
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp6 = triton_helpers.maximum(tmp5, tmp4)
tmp7 = tmp1 > tmp0
tmp8 = tl.full([1], 1, tl.int8)
tmp9 = tl.full([1], 0, tl.int8)
tmp10 = tl.where(tmp7, tmp8, tmp9)
tmp11 = tmp3 > tmp2
tmp12 = tl.full([1], 2, tl.int8)
tmp13 = tl.where(tmp11, tmp12, tmp10)
tmp14 = tmp5 > tmp4
tmp15 = tl.full([1], 3, tl.int8)
tmp16 = tl.where(tmp14, tmp15, tmp13)
tl.store(out_ptr0 + (x4 + 1184 * x2), tmp6, xmask)
tl.store(out_ptr1 + (x4 + 1280 * x2), tmp16, xmask)
@triton.jit
def triton_poi_fused_convolution_2(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 6400
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 100 % 16
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
@triton.jit
def triton_poi_fused_max_pool2d_with_indices_3(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 1600
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 5
x1 = xindex // 5
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 20 * x1), xmask, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 20 * x1), xmask, eviction_policy
='evict_last')
tmp7 = tl.load(in_ptr0 + (10 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp12 = tl.load(in_ptr0 + (11 + 2 * x0 + 20 * x1), xmask,
eviction_policy='evict_last')
tmp2 = tmp1 > tmp0
tmp3 = tl.full([1], 1, tl.int8)
tmp4 = tl.full([1], 0, tl.int8)
tmp5 = tl.where(tmp2, tmp3, tmp4)
tmp6 = triton_helpers.maximum(tmp1, tmp0)
tmp8 = tmp7 > tmp6
tmp9 = tl.full([1], 2, tl.int8)
tmp10 = tl.where(tmp8, tmp9, tmp5)
tmp11 = triton_helpers.maximum(tmp7, tmp6)
tmp13 = tmp12 > tmp11
tmp14 = tl.full([1], 3, tl.int8)
tmp15 = tl.where(tmp13, tmp14, tmp10)
tmp16 = triton_helpers.maximum(tmp12, tmp11)
tl.store(out_ptr0 + x2, tmp15, xmask)
tl.store(out_ptr1 + x2, tmp16, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (6, 3, 5, 5), (75, 25, 5, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 3, 32, 32), (3072, 1024, 32, 1))
assert_size_stride(primals_4, (16, 6, 5, 5), (150, 25, 5, 1))
assert_size_stride(primals_5, (16,), (1,))
assert_size_stride(primals_6, (120, 400), (400, 1))
assert_size_stride(primals_7, (120,), (1,))
assert_size_stride(primals_8, (84, 120), (120, 1))
assert_size_stride(primals_9, (84,), (1,))
assert_size_stride(primals_10, (10, 84), (84, 1))
assert_size_stride(primals_11, (10,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 6, 28, 28), (4704, 784, 28, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(18816)](buf1, primals_2, 18816,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((4, 6, 14, 14), (1184, 196, 14, 1), torch
.float32)
buf3 = empty_strided_cuda((4, 6, 14, 14), (1280, 196, 14, 1), torch
.int8)
triton_poi_fused_max_pool2d_with_indices_1[grid(4704)](buf1, buf2,
buf3, 4704, XBLOCK=256, num_warps=4, num_stages=1)
buf4 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 16, 10, 10), (1600, 100, 10, 1))
buf5 = buf4
del buf4
triton_poi_fused_convolution_2[grid(6400)](buf5, primals_5, 6400,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.int8)
buf7 = empty_strided_cuda((4, 16, 5, 5), (400, 25, 5, 1), torch.float32
)
triton_poi_fused_max_pool2d_with_indices_3[grid(1600)](buf5, buf6,
buf7, 1600, XBLOCK=128, num_warps=4, num_stages=1)
buf8 = empty_strided_cuda((4, 120), (120, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf7, (4, 400),
(400, 1), 0), reinterpret_tensor(primals_6, (400, 120), (1, 400
), 0), alpha=1, beta=1, out=buf8)
del primals_7
buf9 = empty_strided_cuda((4, 84), (84, 1), torch.float32)
extern_kernels.addmm(primals_9, buf8, reinterpret_tensor(primals_8,
(120, 84), (1, 120), 0), alpha=1, beta=1, out=buf9)
del primals_9
buf10 = empty_strided_cuda((4, 10), (10, 1), torch.float32)
extern_kernels.addmm(primals_11, buf9, reinterpret_tensor(
primals_10, (84, 10), (1, 84), 0), alpha=1, beta=1, out=buf10)
del primals_11
return (buf10, primals_1, primals_3, primals_4, buf1, buf2, buf3, buf5,
buf6, reinterpret_tensor(buf7, (4, 400), (400, 1), 0), buf8, buf9,
primals_10, primals_8, primals_6)
class NetNew(tnn.Module):
def __init__(self):
super(NetNew, self).__init__()
self.conv1 = tnn.Conv2d(3, 6, 5)
self.pool = tnn.MaxPool2d(2, 2)
self.conv2 = tnn.Conv2d(6, 16, 5)
self.fc1 = tnn.Linear(16 * 5 * 5, 120)
self.fc2 = tnn.Linear(120, 84)
self.fc3 = tnn.Linear(84, 10)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.fc1.weight
primals_7 = self.fc1.bias
primals_8 = self.fc2.weight
primals_9 = self.fc2.bias
primals_10 = self.fc3.weight
primals_11 = self.fc3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
Exusial/jittor
|
Net
| false | 13,674 |
[
"Apache-2.0"
] | 2,571 |
eca21d5bba5098bce4f492fa44908677b6e76588
|
https://github.com/Exusial/jittor/tree/eca21d5bba5098bce4f492fa44908677b6e76588
|
Attention
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ay/caylcn737p2wwjm32cacv462xdgdut6ho32ptwxfu34t3i2tr75z.py
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# dot_products => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%expand_1,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16) % 4
x3 = (xindex // 64)
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1) + (64*x3)), xmask)
tl.store(out_ptr0 + (x4), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/eh/ceheq5ns3kg3p6tebb47gdy475c5v4keklf245jl4fgbpvugznm5.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_2, 1), kwargs = {})
# %amax_default : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor, [-1], True), kwargs = {})
# %sub_tensor : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor, %amax_default), kwargs = {})
# %div_tensor : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out=buf1)
del arg1_1
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 256, grid=grid(256), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf2, buf3, 256, grid=grid(256), stream=stream0)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1), 0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4)
del arg2_1
del buf3
return (reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
from torch import nn
from torch.nn import functional as F
import torch.utils.data
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Attention(nn.Module):
def __init__(self, d_key, dropout_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(dropout_ratio)
self.causal = causal
def forward(self, query, key, value, padding=None):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and self.causal:
tri = key.new_ones((key.size(1), key.size(1))).triu(1) * INF
dot_products.sub_(tri.unsqueeze(0))
if padding is not None:
dot_products.masked_fill_(padding.unsqueeze(1).expand_as(
dot_products), -INF)
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=-1)), value)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_key': 4, 'dropout_ratio': 0.5, 'causal': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16 % 4
x3 = xindex // 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1 + 64 * x3), xmask)
tl.store(out_ptr0 + x4, tmp0, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, arg1_1, arg2_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((16, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(arg1_1, (16, 4, 4), (16, 4, 1
), 0), reinterpret_tensor(buf0, (16, 4, 4), (16, 4, 1), 0), out
=buf1)
del arg1_1
buf2 = buf0
del buf0
triton_poi_fused__softmax_1[grid(256)](buf1, buf2, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf1
triton_poi_fused__softmax_2[grid(256)](buf2, buf3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf4 = reinterpret_tensor(buf2, (16, 4, 4), (16, 4, 1), 0)
del buf2
extern_kernels.bmm(reinterpret_tensor(buf3, (16, 4, 4), (16, 4, 1),
0), reinterpret_tensor(arg2_1, (16, 4, 4), (16, 4, 1), 0), out=buf4
)
del arg2_1
del buf3
return reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0),
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class AttentionNew(nn.Module):
def __init__(self, d_key, dropout_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(dropout_ratio)
self.causal = causal
def forward(self, input_0, input_1, input_2):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
output = call([arg0_1, arg1_1, arg2_1])
return output[0]
|
FGDBTKD/decaNLP
|
Attention
| false | 13,675 |
[
"BSD-3-Clause"
] | 2,361 |
ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
|
https://github.com/FGDBTKD/decaNLP/tree/ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
|
ChannelNorm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/pd/cpdkd7oy3g2qlxint6tsd7foiux4zykryk7wd4pgiuuqhuclbvui.py
# Topologically Sorted Source Nodes: [cumMean, cumVar, sub, add, rsqrt, x, mul_1, x_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.rsqrt, aten.mul]
# Source node to ATen node mapping:
# add => add
# cumMean => mean
# cumVar => var
# mul_1 => mul_1
# rsqrt => rsqrt
# sub => sub
# x => mul
# x_1 => add_1
# Graph fragment:
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%primals_1, [1]), kwargs = {correction: 1, keepdim: True})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%primals_1, %mean), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%var, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_3), kwargs = {})
triton_poi_fused_add_mean_mul_rsqrt_sub_var_0 = async_compile.triton('triton_poi_fused_add_mean_mul_rsqrt_sub_var_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_mul_rsqrt_sub_var_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_mul_rsqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = (xindex // 64)
x5 = xindex % 16
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x4), xmask)
tmp1 = tl.load(in_ptr0 + (x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x5 + (64*x3)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp10 * tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x4), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (1, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [cumMean, cumVar, sub, add, rsqrt, x, mul_1, x_1], Original ATen: [aten.mean, aten.var, aten.sub, aten.add, aten.rsqrt, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mean_mul_rsqrt_sub_var_0.run(primals_1, primals_2, primals_3, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
del primals_3
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class ChannelNorm(nn.Module):
def __init__(self, numFeatures, epsilon=1e-05, affine=True):
super(ChannelNorm, self).__init__()
if affine:
self.weight = nn.parameter.Parameter(torch.Tensor(1,
numFeatures, 1))
self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1))
else:
self.weight = None
self.bias = None
self.epsilon = epsilon
self.p = 0
self.affine = affine
self.reset_parameters()
def reset_parameters(self):
if self.affine:
torch.nn.init.ones_(self.weight)
torch.nn.init.zeros_(self.bias)
def forward(self, x):
cumMean = x.mean(dim=1, keepdim=True)
cumVar = x.var(dim=1, keepdim=True)
x = (x - cumMean) * torch.rsqrt(cumVar + self.epsilon)
if self.weight is not None:
x = x * self.weight + self.bias
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'numFeatures': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mean_mul_rsqrt_sub_var_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex
x3 = xindex // 64
x5 = xindex % 16
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x4, xmask)
tmp1 = tl.load(in_ptr0 + (x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x5 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp28 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = 4.0
tmp9 = tmp7 / tmp8
tmp10 = tmp0 - tmp9
tmp11 = tmp1 - tmp9
tmp12 = tmp11 * tmp11
tmp13 = tmp2 - tmp9
tmp14 = tmp13 * tmp13
tmp15 = tmp12 + tmp14
tmp16 = tmp4 - tmp9
tmp17 = tmp16 * tmp16
tmp18 = tmp15 + tmp17
tmp19 = tmp6 - tmp9
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = 3.0
tmp23 = tmp21 / tmp22
tmp24 = 1e-05
tmp25 = tmp23 + tmp24
tmp26 = libdevice.rsqrt(tmp25)
tmp27 = tmp10 * tmp26
tmp29 = tmp27 * tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x4, tmp31, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (1, 4, 1), (4, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_mean_mul_rsqrt_sub_var_0[grid(256)](primals_1,
primals_2, primals_3, buf0, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_2
del primals_3
return buf0, primals_1
class ChannelNormNew(nn.Module):
def __init__(self, numFeatures, epsilon=1e-05, affine=True):
super(ChannelNormNew, self).__init__()
if affine:
self.weight = nn.parameter.Parameter(torch.Tensor(1,
numFeatures, 1))
self.bias = nn.parameter.Parameter(torch.Tensor(1, numFeatures, 1))
else:
self.weight = None
self.bias = None
self.epsilon = epsilon
self.p = 0
self.affine = affine
self.reset_parameters()
def reset_parameters(self):
if self.affine:
torch.nn.init.ones_(self.weight)
torch.nn.init.zeros_(self.bias)
def forward(self, input_0):
primals_2 = self.weight
primals_3 = self.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
EyalSel/CPC_audio
|
ChannelNorm
| false | 13,676 |
[
"MIT"
] | 260 |
b98a1bdf1fe9ea219816db7a6c28115d404a3510
|
https://github.com/EyalSel/CPC_audio/tree/b98a1bdf1fe9ea219816db7a6c28115d404a3510
|
Conv
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/au/cau4pihcaptiev5y2ewn2o2nvrwhk7hogc72cofmmtbyv4rxc2oy.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
del buf0
buf2 = buf1; del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(buf2, primals_3, 64, grid=grid(64), stream=stream0)
del primals_3
return (reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class Conv(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(Conv, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
def forward(self, x):
x = x.contiguous().transpose(1, 2)
x = self.conv(x)
x = x.contiguous().transpose(1, 2)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl
.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 4), (16, 4, 1))
del buf0
buf2 = buf1
del buf1
triton_poi_fused_convolution_1[grid(64)](buf2, primals_3, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_3
return reinterpret_tensor(buf2, (4, 4, 4), (16, 1, 4), 0
), primals_2, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0)
class ConvNew(nn.Module):
"""
Convolution Module
"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=0, dilation=1, bias=True, w_init='linear'):
"""
:param in_channels: dimension of input
:param out_channels: dimension of output
:param kernel_size: size of kernel
:param stride: size of stride
:param padding: size of padding
:param dilation: dilation rate
:param bias: boolean. if True, bias is included.
:param w_init: str. weight inits with xavier initialization.
"""
super(ConvNew, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=
kernel_size, stride=stride, padding=padding, dilation=dilation,
bias=bias)
def forward(self, input_0):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
FarisHijazi/klaam
|
Conv
| false | 13,677 |
[
"MIT"
] | 119 |
380b3cbf167bd4288cf5f3476e51f0939dff9e2c
|
https://github.com/FarisHijazi/klaam/tree/380b3cbf167bd4288cf5f3476e51f0939dff9e2c
|
LinearFeedforward
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_3, buf3, 256, grid=grid(256), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf2)
del primals_5
return (reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.utils.data
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class Feedforward(nn.Module):
def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2):
super().__init__()
if activation is not None:
self.activation = getattr(torch, activation)
else:
self.activation = lambda x: x
self.linear = Linear(d_in, d_out, bias=bias)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.activation(self.linear(self.dropout(x)))
class LinearFeedforward(nn.Module):
def __init__(self, d_in, d_hid, d_out, activation='relu'):
super().__init__()
self.feedforward = Feedforward(d_in, d_hid, activation=activation)
self.linear = Linear(d_hid, d_out)
self.dropout = nn.Dropout(0.2)
def forward(self, x):
return self.dropout(self.linear(self.feedforward(x)))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'d_in': 4, 'd_hid': 4, 'd_out': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_3, buf3, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf2)
del primals_5
return reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0
), reinterpret_tensor(primals_1, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_4, buf3
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class Feedforward(nn.Module):
def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2):
super().__init__()
if activation is not None:
self.activation = getattr(torch, activation)
else:
self.activation = lambda x: x
self.linear = Linear(d_in, d_out, bias=bias)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.activation(self.linear(self.dropout(x)))
class LinearFeedforwardNew(nn.Module):
def __init__(self, d_in, d_hid, d_out, activation='relu'):
super().__init__()
self.feedforward = Feedforward(d_in, d_hid, activation=activation)
self.linear = Linear(d_hid, d_out)
self.dropout = nn.Dropout(0.2)
def forward(self, input_0):
primals_2 = self.feedforward.linear.weight
primals_3 = self.feedforward.linear.bias
primals_4 = self.linear.weight
primals_5 = self.linear.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
FGDBTKD/decaNLP
|
LinearFeedforward
| false | 13,678 |
[
"BSD-3-Clause"
] | 2,361 |
ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
|
https://github.com/FGDBTKD/decaNLP/tree/ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
|
EncoderImagePrecomp
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xa/cxakusasdtllfgw2fyeffneg3plpnkfny7w34qfgqsdm2rut2e74.py
# Topologically Sorted Source Nodes: [X], Original ATen: [aten.div]
# Source node to ATen node mapping:
# X => div
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%addmm, %expand), kwargs = {})
triton_poi_fused_div_0 = async_compile.triton('triton_poi_fused_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [features], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [X], Original ATen: [aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_div_0.run(buf0, buf1, 16, grid=grid(16), stream=stream0)
return (buf1, primals_3, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import numpy as np
from collections import OrderedDict
import torch.nn as nn
import torch.nn.init
def l2norm(X):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1).sqrt().view(X.size(0), -1)
X = torch.div(X, norm.expand_as(X))
return X
class EncoderImagePrecomp(nn.Module):
def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False):
super(EncoderImagePrecomp, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.use_abs = use_abs
self.fc = nn.Linear(img_dim, embed_size)
self.init_weights()
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def forward(self, images):
"""Extract image feature vectors."""
features = self.fc(images)
if not self.no_imgnorm:
features = l2norm(features)
if self.use_abs:
features = torch.abs(features)
return features
def load_state_dict(self, state_dict):
"""Copies parameters. overwritting the default one to
accept state_dict from Full model
"""
own_state = self.state_dict()
new_state = OrderedDict()
for name, param in list(state_dict.items()):
if name in own_state:
new_state[name] = param
super(EncoderImagePrecomp, self).load_state_dict(new_state)
def __call__(self, images):
return self.forward(images)
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'img_dim': 4, 'embed_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import numpy as np
from collections import OrderedDict
import torch.nn as nn
import torch.nn.init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp1
tmp4 = tmp3 * tmp3
tmp5 = tmp2 + tmp4
tmp7 = tmp6 * tmp6
tmp8 = tmp5 + tmp7
tmp10 = tmp9 * tmp9
tmp11 = tmp8 + tmp10
tmp12 = libdevice.sqrt(tmp11)
tmp13 = tmp0 / tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, primals_3, reinterpret_tensor(
primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_div_0[grid(16)](buf0, buf1, 16, XBLOCK=16,
num_warps=1, num_stages=1)
return buf1, primals_3, buf0
def l2norm(X):
"""L2-normalize columns of X
"""
norm = torch.pow(X, 2).sum(dim=1).sqrt().view(X.size(0), -1)
X = torch.div(X, norm.expand_as(X))
return X
class EncoderImagePrecompNew(nn.Module):
def __init__(self, img_dim, embed_size, use_abs=False, no_imgnorm=False):
super(EncoderImagePrecompNew, self).__init__()
self.embed_size = embed_size
self.no_imgnorm = no_imgnorm
self.use_abs = use_abs
self.fc = nn.Linear(img_dim, embed_size)
self.init_weights()
def init_weights(self):
"""Xavier initialization for the fully connected layer
"""
r = np.sqrt(6.0) / np.sqrt(self.fc.in_features + self.fc.out_features)
self.fc.weight.data.uniform_(-r, r)
self.fc.bias.data.fill_(0)
def load_state_dict(self, state_dict):
"""Copies parameters. overwritting the default one to
accept state_dict from Full model
"""
own_state = self.state_dict()
new_state = OrderedDict()
for name, param in list(state_dict.items()):
if name in own_state:
new_state[name] = param
super(EncoderImagePrecompNew, self).load_state_dict(new_state)
def __call__(self, images):
return self.forward(images)
def forward(self, input_0):
primals_1 = self.fc.weight
primals_2 = self.fc.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
ExplorerFreda/VSE-C
|
EncoderImagePrecomp
| false | 13,679 |
[
"MIT"
] | 61 |
52d7742adfe017eacd74f36a5953ea2ace9f5fce
|
https://github.com/ExplorerFreda/VSE-C/tree/52d7742adfe017eacd74f36a5953ea2ace9f5fce
|
MultiHead
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_3, [-1], True), kwargs = {})
# %sub_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_3, %amax_default_3), kwargs = {})
# %div_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_3, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_3,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mk/cmkim2hc4ksxhatli3y5cu7hoqofxcbzqrrxvnlhmswdt4cgww25.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %bmm_3, %bmm_5, %bmm_7], -1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (x1), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6)
buf7 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [dot_products_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10)
buf11 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [dot_products_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf11, buf12, 64, grid=grid(64), stream=stream0)
buf13 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf12, buf13, 64, grid=grid(64), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14)
buf15 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [dot_products_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf15, buf16, 64, grid=grid(64), stream=stream0)
buf17 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf16, buf17, 64, grid=grid(64), stream=stream0)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18)
buf19 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf6, buf10, buf14, buf18, buf19, 64, grid=grid(64), stream=stream0)
del buf10
del buf14
del buf18
del buf6
return (buf19, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_5, (16, 4), (4, 1), 0), buf5, buf9, buf13, buf17, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
from torch import nn
from torch.nn import functional as F
import torch.utils.data
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class Attention(nn.Module):
def __init__(self, d_key, dropout_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(dropout_ratio)
self.causal = causal
def forward(self, query, key, value, padding=None):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and self.causal:
tri = key.new_ones((key.size(1), key.size(1))).triu(1) * INF
dot_products.sub_(tri.unsqueeze(0))
if padding is not None:
dot_products.masked_fill_(padding.unsqueeze(1).expand_as(
dot_products), -INF)
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=-1)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, dropout_ratio, causal=causal)
self.wq = Linear(d_key, d_key, bias=False)
self.wk = Linear(d_key, d_key, bias=False)
self.wv = Linear(d_value, d_value, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value, padding=None):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key,
value))
return torch.cat([self.attention(q, k, v, padding=padding) for q, k,
v in zip(query, key, value)], -1)
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4, 4])
]
def get_init_inputs():
return [[], {'d_key': 4, 'd_value': 4, 'n_heads': 4, 'dropout_ratio': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import math
from torch import nn
from torch.nn import functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_6, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf1)
del primals_4
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_5, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), out=buf2)
del primals_6
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 0), out=buf6)
buf7 = buf4
del buf4
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 1), out=buf10)
buf11 = buf8
del buf8
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = buf11
del buf11
triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 2), out=buf14)
buf15 = buf12
del buf12
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = buf15
del buf15
triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 3), out=buf18)
buf19 = buf16
del buf16
triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf10
del buf14
del buf18
del buf6
return buf19, reinterpret_tensor(primals_1, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), reinterpret_tensor(primals_5, (16, 4), (4, 1), 0
), buf5, buf9, buf13, buf17, reinterpret_tensor(buf2, (4, 1, 4), (
16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2
), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2
), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1
), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1
), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1
), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0
), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0
), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0)
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class Attention(nn.Module):
def __init__(self, d_key, dropout_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(dropout_ratio)
self.causal = causal
def forward(self, query, key, value, padding=None):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and self.causal:
tri = key.new_ones((key.size(1), key.size(1))).triu(1) * INF
dot_products.sub_(tri.unsqueeze(0))
if padding is not None:
dot_products.masked_fill_(padding.unsqueeze(1).expand_as(
dot_products), -INF)
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=-1)), value)
class MultiHeadNew(nn.Module):
def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, dropout_ratio, causal=causal)
self.wq = Linear(d_key, d_key, bias=False)
self.wk = Linear(d_key, d_key, bias=False)
self.wv = Linear(d_value, d_value, bias=False)
self.n_heads = n_heads
def forward(self, input_0, input_1, input_2):
primals_2 = self.wq.weight
primals_4 = self.wk.weight
primals_6 = self.wv.weight
primals_1 = input_0
primals_3 = input_1
primals_5 = input_2
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
FGDBTKD/decaNLP
|
MultiHead
| false | 13,680 |
[
"BSD-3-Clause"
] | 2,361 |
ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
|
https://github.com/FGDBTKD/decaNLP/tree/ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
|
NormLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/lh/clhwt6v3a5lppxa444xcegkpbyd2ik5hm2mrr3tdmmbjsrqtimrv.py
# Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm]
# Source node to ATen node mapping:
# norm => pow_1, pow_2, sum_1
# Graph fragment:
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg0_1, 4), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%pow_1, None), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sum_1, 0.25), kwargs = {})
triton_per_fused_linalg_vector_norm_0 = async_compile.triton('triton_per_fused_linalg_vector_norm_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_linalg_vector_norm_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = 0.25
tmp7 = libdevice.pow(tmp5, tmp6)
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [norm], Original ATen: [aten.linalg_vector_norm]
stream0 = get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0.run(buf1, arg0_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
class NormLoss(torch.nn.Module):
"""
Norm penalty on function
parameters:
p - dimension of norm
"""
def __init__(self, p):
super(NormLoss, self).__init__()
self.p = p
def forward(self, beta):
return torch.norm(beta, p=self.p)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'p': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_linalg_vector_norm_0(in_out_ptr0, in_ptr0, xnumel, rnumel
):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp1 = tmp0 * tmp0
tmp2 = tmp1 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = 0.25
tmp7 = libdevice.pow(tmp5, tmp6)
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp7, None)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_linalg_vector_norm_0[grid(1)](buf1, arg0_1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
return buf1,
class NormLossNew(torch.nn.Module):
"""
Norm penalty on function
parameters:
p - dimension of norm
"""
def __init__(self, p):
super(NormLossNew, self).__init__()
self.p = p
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Filco306/TopologyLayer
|
NormLoss
| false | 13,681 |
[
"MIT"
] | 250 |
1d6261017a80cff0ee06bb896ded40777b0989b4
|
https://github.com/Filco306/TopologyLayer/tree/1d6261017a80cff0ee06bb896ded40777b0989b4
|
BoundaryDiscriminator
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/x5/cx5x3ctvlxfngzqxjhckinfe5o47op62tt3oe6cizqbvhu2jadcg.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/j6/cj6preby6v25qpri36zdc64okaiku4g3n2g3ntdegoyuutlffwkx.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/24/c24d6loxuicc53yfdbmizoqhkrh5m2bpnt3a4ffeuex7f6y3vnth.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_2 => gt_2, mul_2, where_2
# Graph fragment:
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f7/cf7lcprivgof4fqsrblqrk334iqoref57wus34pdpmrc65sotzf7.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_3 => gt_3, mul_3, where_3
# Graph fragment:
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
triton_poi_fused_leaky_relu_3 = async_compile.triton('triton_poi_fused_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 51200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, None)
tl.store(out_ptr1 + (x0), tmp5, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (64, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_6, (1, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.bool)
buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, buf1, buf2, 278784, grid=grid(278784), stream=stream0)
del buf0
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1))
buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.bool)
buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, buf4, buf5, 147968, grid=grid(147968), stream=stream0)
del buf3
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1))
buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool)
buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf6, buf7, buf8, 82944, grid=grid(82944), stream=stream0)
del buf6
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1))
buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.bool)
buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_3.run(buf9, buf10, buf11, 51200, grid=grid(51200), stream=stream0)
del buf9
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 3, 3), (9, 9, 3, 1))
return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 1, 4, 4), (16, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 1, 64, 64), (4096, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class BoundaryDiscriminator(nn.Module):
def __init__(self):
super(BoundaryDiscriminator, self).__init__()
filter_num_list = [64, 128, 256, 512, 1]
self.conv1 = nn.Conv2d(1, filter_num_list[0], kernel_size=4, stride
=2, padding=2, bias=False)
self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4],
kernel_size=4, stride=2, padding=2, bias=False)
self.leakyrelu = nn.LeakyReLU(negative_slope=0.2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.leakyrelu(self.conv1(x))
x = self.leakyrelu(self.conv2(x))
x = self.leakyrelu(self.conv3(x))
x = self.leakyrelu(self.conv4(x))
x = self.conv5(x)
return x
def get_inputs():
return [torch.rand([4, 1, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, None)
tl.store(out_ptr1 + x0, tmp5, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (64, 1, 4, 4), (16, 16, 4, 1))
assert_size_stride(primals_2, (4, 1, 64, 64), (4096, 4096, 64, 1))
assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_6, (1, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2,
2), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(278784)](buf0, buf1, buf2,
278784, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1))
buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.float32)
triton_poi_fused_leaky_relu_1[grid(147968)](buf3, buf4, buf5,
147968, XBLOCK=512, num_warps=8, num_stages=1)
del buf3
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1))
buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool
)
buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.
float32)
triton_poi_fused_leaky_relu_2[grid(82944)](buf6, buf7, buf8, 82944,
XBLOCK=512, num_warps=8, num_stages=1)
del buf6
buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1))
buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.bool)
buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.float32)
triton_poi_fused_leaky_relu_3[grid(51200)](buf9, buf10, buf11,
51200, XBLOCK=256, num_warps=4, num_stages=1)
del buf9
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 3, 3), (9, 9, 3, 1))
return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11)
class BoundaryDiscriminatorNew(nn.Module):
def __init__(self):
super(BoundaryDiscriminatorNew, self).__init__()
filter_num_list = [64, 128, 256, 512, 1]
self.conv1 = nn.Conv2d(1, filter_num_list[0], kernel_size=4, stride
=2, padding=2, bias=False)
self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4],
kernel_size=4, stride=2, padding=2, bias=False)
self.leakyrelu = nn.LeakyReLU(negative_slope=0.2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_6 = self.conv5.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
EmmaW8/BEAL
|
BoundaryDiscriminator
| false | 13,682 |
[
"MIT"
] | 95 |
945cad38a354605b8bca5bc01ae1b65848d605e1
|
https://github.com/EmmaW8/BEAL/tree/945cad38a354605b8bca5bc01ae1b65848d605e1
|
OutputDiscriminator
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/x5/cx5x3ctvlxfngzqxjhckinfe5o47op62tt3oe6cizqbvhu2jadcg.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/j6/cj6preby6v25qpri36zdc64okaiku4g3n2g3ntdegoyuutlffwkx.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/24/c24d6loxuicc53yfdbmizoqhkrh5m2bpnt3a4ffeuex7f6y3vnth.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_2 => gt_2, mul_2, where_2
# Graph fragment:
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f7/cf7lcprivgof4fqsrblqrk334iqoref57wus34pdpmrc65sotzf7.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_3 => gt_3, mul_3, where_3
# Graph fragment:
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
triton_poi_fused_leaky_relu_3 = async_compile.triton('triton_poi_fused_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 51200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, None)
tl.store(out_ptr1 + (x0), tmp5, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (64, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(primals_2, (4, 2, 64, 64), (8192, 4096, 64, 1))
assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_6, (1, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.bool)
buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, buf1, buf2, 278784, grid=grid(278784), stream=stream0)
del buf0
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1))
buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.bool)
buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, buf4, buf5, 147968, grid=grid(147968), stream=stream0)
del buf3
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1))
buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool)
buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf6, buf7, buf8, 82944, grid=grid(82944), stream=stream0)
del buf6
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1))
buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.bool)
buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_3.run(buf9, buf10, buf11, 51200, grid=grid(51200), stream=stream0)
del buf9
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 3, 3), (9, 9, 3, 1))
return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 2, 4, 4), (32, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 2, 64, 64), (8192, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class OutputDiscriminator(nn.Module):
def __init__(self):
super(OutputDiscriminator, self).__init__()
filter_num_list = [64, 128, 256, 512, 1]
self.conv1 = nn.Conv2d(2, filter_num_list[0], kernel_size=4, stride
=2, padding=2, bias=False)
self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4],
kernel_size=4, stride=2, padding=2, bias=False)
self.leakyrelu = nn.LeakyReLU(negative_slope=0.2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.leakyrelu(self.conv1(x))
x = self.leakyrelu(self.conv2(x))
x = self.leakyrelu(self.conv3(x))
x = self.leakyrelu(self.conv4(x))
x = self.conv5(x)
return x
def get_inputs():
return [torch.rand([4, 2, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, None)
tl.store(out_ptr1 + x0, tmp5, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (64, 2, 4, 4), (32, 16, 4, 1))
assert_size_stride(primals_2, (4, 2, 64, 64), (8192, 4096, 64, 1))
assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_6, (1, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2,
2), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(278784)](buf0, buf1, buf2,
278784, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1))
buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.float32)
triton_poi_fused_leaky_relu_1[grid(147968)](buf3, buf4, buf5,
147968, XBLOCK=512, num_warps=8, num_stages=1)
del buf3
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1))
buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool
)
buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.
float32)
triton_poi_fused_leaky_relu_2[grid(82944)](buf6, buf7, buf8, 82944,
XBLOCK=512, num_warps=8, num_stages=1)
del buf6
buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1))
buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.bool)
buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.float32)
triton_poi_fused_leaky_relu_3[grid(51200)](buf9, buf10, buf11,
51200, XBLOCK=256, num_warps=4, num_stages=1)
del buf9
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 3, 3), (9, 9, 3, 1))
return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11)
class OutputDiscriminatorNew(nn.Module):
def __init__(self):
super(OutputDiscriminatorNew, self).__init__()
filter_num_list = [64, 128, 256, 512, 1]
self.conv1 = nn.Conv2d(2, filter_num_list[0], kernel_size=4, stride
=2, padding=2, bias=False)
self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4],
kernel_size=4, stride=2, padding=2, bias=False)
self.leakyrelu = nn.LeakyReLU(negative_slope=0.2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_6 = self.conv5.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
EmmaW8/BEAL
|
OutputDiscriminator
| false | 13,683 |
[
"MIT"
] | 95 |
945cad38a354605b8bca5bc01ae1b65848d605e1
|
https://github.com/EmmaW8/BEAL/tree/945cad38a354605b8bca5bc01ae1b65848d605e1
|
PReLU
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ny/cnylvjg56iz4qbiyrq3d6r64xfhklztecm7on2cqbsglnqcjqav4.py
# Topologically Sorted Source Nodes: [relu, neg, relu_1, mul, sub], Original ATen: [aten.relu, aten.neg, aten.mul, aten.sub]
# Source node to ATen node mapping:
# mul => mul
# neg => neg
# relu => relu
# relu_1 => relu_1
# sub => sub
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%primals_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%primals_1,), kwargs = {})
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%neg,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %relu_1), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%relu, %mul), kwargs = {})
triton_poi_fused_mul_neg_relu_sub_0 = async_compile.triton('triton_poi_fused_mul_neg_relu_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_neg_relu_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_neg_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp3 = tl.load(in_ptr1 + (0))
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp5 = -tmp0
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = tmp4 * tmp6
tmp8 = tmp2 - tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [relu, neg, relu_1, mul, sub], Original ATen: [aten.relu, aten.neg, aten.mul, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_neg_relu_sub_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf0, primals_1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((), (), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.utils.data
import torch.cuda
from torch.nn import Parameter
import torch.optim
class PReLU(nn.Module):
def __init__(self):
super(PReLU, self).__init__()
self.alpha = Parameter(torch.tensor(0.25))
def forward(self, x):
return nn.ReLU()(x) - self.alpha * nn.ReLU()(-x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.utils.data
import torch.cuda
from torch.nn import Parameter
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_neg_relu_sub_0(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp3 = tl.load(in_ptr1 + 0)
tmp4 = tl.broadcast_to(tmp3, [XBLOCK])
tmp1 = tl.full([1], 0, tl.int32)
tmp2 = triton_helpers.maximum(tmp1, tmp0)
tmp5 = -tmp0
tmp6 = triton_helpers.maximum(tmp1, tmp5)
tmp7 = tmp4 * tmp6
tmp8 = tmp2 - tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (), ())
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_neg_relu_sub_0[grid(256)](primals_1, primals_2,
buf0, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
return buf0, primals_1
class PReLUNew(nn.Module):
def __init__(self):
super(PReLUNew, self).__init__()
self.alpha = Parameter(torch.tensor(0.25))
def forward(self, input_0):
primals_2 = self.alpha
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Flamexmt/LMA
|
PReLU
| false | 13,684 |
[
"MIT"
] | 321 |
f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
|
https://github.com/Flamexmt/LMA/tree/f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
|
ResidualBlock_noBN
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ej/cejfrwnzxinkchwn6symdb72fdtj7gix5hy2vuswodhbeh45mrae.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/li/climin3xbj6rm2mbnxyxnqlna6vggaazmibti2uk3s4hdmjrzu3e.py
# Topologically Sorted Source Nodes: [out_1, add], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# add => add
# out_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %convolution_1), kwargs = {})
triton_poi_fused_add_convolution_1 = async_compile.triton('triton_poi_fused_add_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_out_ptr0 + (x3), None)
tmp2 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64, ), (1, ))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 1048576, grid=grid(1048576), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out_1, add], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_1.run(buf3, primals_1, primals_5, 1048576, grid=grid(1048576), stream=stream0)
del primals_5
return (buf3, primals_1, primals_2, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 64, 64, 64), (262144, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
import torch.nn.functional as F
import torch.nn as nn
import torch.nn.init as init
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
class ResidualBlock_noBN(nn.Module):
"""Residual block w/o BN
---Conv-ReLU-Conv-+-
|________________|
"""
def __init__(self, nf=64):
super(ResidualBlock_noBN, self).__init__()
self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, x):
identity = x
out = F.relu(self.conv1(x), inplace=True)
out = self.conv2(out)
return identity + out
def get_inputs():
return [torch.rand([4, 64, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
import torch.nn.init as init
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_out_ptr0 + x3, None)
tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 64, 64, 64), (262144, 4096, 64, 1))
assert_size_stride(primals_2, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_3, (64,), (1,))
assert_size_stride(primals_4, (64, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_5, (64,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(1048576)](buf1, primals_3,
1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 64, 64, 64), (262144, 4096, 64, 1))
buf3 = buf2
del buf2
triton_poi_fused_add_convolution_1[grid(1048576)](buf3, primals_1,
primals_5, 1048576, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_2, primals_4, buf1
def initialize_weights(net_l, scale=1):
if not isinstance(net_l, list):
net_l = [net_l]
for net in net_l:
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal_(m.weight, a=0, mode='fan_in')
m.weight.data *= scale
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias.data, 0.0)
class ResidualBlock_noBNNew(nn.Module):
"""Residual block w/o BN
---Conv-ReLU-Conv-+-
|________________|
"""
def __init__(self, nf=64):
super(ResidualBlock_noBNNew, self).__init__()
self.conv1 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
EvgeneyZ/TMNet
|
ResidualBlock_noBN
| false | 13,685 |
[
"Apache-2.0"
] | 90 |
8a42754747c2fa575e9108c13b5018a884f46099
|
https://github.com/EvgeneyZ/TMNet/tree/8a42754747c2fa575e9108c13b5018a884f46099
|
ResBlock
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/td/ctdybbibnws4d7ukbk3fpn35zkgapxylowdhzwx7vgsllncbdrxa.py
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# out => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_1, %primals_2, %primals_3, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_0 = async_compile.triton('triton_poi_fused_convolution_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5a/c5ajoriq7xg2qdp3jfbnlkgzou4dyv5gi73sivhaoycfww2pez66.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out_1 => convolution_1
# out_2 => add
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, %primals_1), kwargs = {})
triton_poi_fused_add_convolution_1 = async_compile.triton('triton_poi_fused_add_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d, out], Original ATen: [aten.convolution, aten.relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_relu_0.run(buf1, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.convolution, aten.add]
triton_poi_fused_add_convolution_1.run(buf3, primals_5, primals_1, 256, grid=grid(256), stream=stream0)
del primals_5
return (buf3, primals_1, primals_2, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
def get_same_padding(kernel_size, dilation):
kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
padding = (kernel_size - 1) // 2
return padding
class ResBlock(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1):
super(ResBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
stride=stride, padding=get_same_padding(kernel_size, dilation),
dilation=dilation)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size,
stride=1, padding=get_same_padding(kernel_size, dilation),
dilation=dilation)
self.relu = nn.ReLU(inplace=True)
self.res_translate = None
if not inplanes == planes or not stride == 1:
self.res_translate = nn.Conv2d(inplanes, planes, kernel_size=1,
stride=stride)
def forward(self, x):
residual = x
out = self.relu(self.conv1(x))
out = self.conv2(out)
if self.res_translate is not None:
residual = self.res_translate(residual)
out += residual
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'inplanes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_convolution_relu_0(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x3, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_convolution_1(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_1, primals_2, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_convolution_relu_0[grid(256)](buf1, primals_3, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_3
buf2 = extern_kernels.convolution(buf1, primals_4, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 4, 4), (64, 16, 4, 1))
buf3 = buf2
del buf2
triton_poi_fused_add_convolution_1[grid(256)](buf3, primals_5,
primals_1, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
return buf3, primals_1, primals_2, primals_4, buf1
def get_same_padding(kernel_size, dilation):
kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
padding = (kernel_size - 1) // 2
return padding
class ResBlockNew(nn.Module):
def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1):
super(ResBlockNew, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
stride=stride, padding=get_same_padding(kernel_size, dilation),
dilation=dilation)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=kernel_size,
stride=1, padding=get_same_padding(kernel_size, dilation),
dilation=dilation)
self.relu = nn.ReLU(inplace=True)
self.res_translate = None
if not inplanes == planes or not stride == 1:
self.res_translate = nn.Conv2d(inplanes, planes, kernel_size=1,
stride=stride)
def forward(self, input_0):
primals_2 = self.conv1.weight
primals_3 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Flemingjp/CDVD-TSP
|
ResBlock
| false | 13,686 |
[
"MIT"
] | 232 |
a2621476deb9386b1bc02570706f490d582930c8
|
https://github.com/Flemingjp/CDVD-TSP/tree/a2621476deb9386b1bc02570706f490d582930c8
|
IIDIsotropicGaussianUVLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/2l/c2lpbmkah6k7ad7ej2t4cu6gqp3tcq5mzhelfntwzyst66pi6yw6.py
# Topologically Sorted Source Nodes: [softplus, sigma2, log, mul, add_2, sub, pow_1, sub_1, pow_2, delta_t_delta, truediv, add_3, loss, sum_1], Original ATen: [aten.softplus, aten.add, aten.log, aten.mul, aten.sub, aten.pow, aten.div, aten.sum]
# Source node to ATen node mapping:
# add_2 => add_2
# add_3 => add_3
# delta_t_delta => add_1
# log => log
# loss => mul_1
# mul => mul
# pow_1 => pow_1
# pow_2 => pow_2
# sigma2 => add
# softplus => exp, gt, log1p, where
# sub => sub
# sub_1 => sub_1
# sum_1 => sum_1
# truediv => div
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, 4), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%add,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%log, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, 1.8378770664093453), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %arg2_1), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg4_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_1, %add), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %div), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, 0.5), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {6: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 7), equal_to_1=(6,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 5, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp13 = tl.load(in_ptr1 + (r0), None)
tmp14 = tl.load(in_ptr2 + (r0), None)
tmp17 = tl.load(in_ptr3 + (r0), None)
tmp18 = tl.load(in_ptr4 + (r0), None)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 4.0
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp9 = 2.0
tmp10 = tmp8 * tmp9
tmp11 = 1.8378770664093453
tmp12 = tmp10 + tmp11
tmp15 = tmp13 - tmp14
tmp16 = tmp15 * tmp15
tmp19 = tmp17 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tmp16 + tmp20
tmp22 = tmp21 / tmp7
tmp23 = tmp12 + tmp22
tmp24 = 0.5
tmp25 = tmp23 * tmp24
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tl.store(out_ptr0 + (tl.full([1], 0, tl.int32)), tmp28, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [softplus, sigma2, log, mul, add_2, sub, pow_1, sub_1, pow_2, delta_t_delta, truediv, add_3, loss, sum_1], Original ATen: [aten.softplus, aten.add, aten.log, aten.mul, aten.sub, aten.pow, aten.div, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0.run(arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, buf0, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
class IIDIsotropicGaussianUVLoss(nn.Module):
"""
Loss for the case of iid residuals with isotropic covariance:
$Sigma_i = sigma_i^2 I$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IIDIsotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, u: 'torch.Tensor', v: 'torch.Tensor', sigma_u:
'torch.Tensor', target_u: 'torch.Tensor', target_v: 'torch.Tensor'):
sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2
loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta /
sigma2)
return loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'sigma_lower_bound': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp13 = tl.load(in_ptr1 + r0, None)
tmp14 = tl.load(in_ptr2 + r0, None)
tmp17 = tl.load(in_ptr3 + r0, None)
tmp18 = tl.load(in_ptr4 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 4.0
tmp7 = tmp5 + tmp6
tmp8 = tl_math.log(tmp7)
tmp9 = 2.0
tmp10 = tmp8 * tmp9
tmp11 = 1.8378770664093453
tmp12 = tmp10 + tmp11
tmp15 = tmp13 - tmp14
tmp16 = tmp15 * tmp15
tmp19 = tmp17 - tmp18
tmp20 = tmp19 * tmp19
tmp21 = tmp16 + tmp20
tmp22 = tmp21 / tmp7
tmp23 = tmp12 + tmp22
tmp24 = 0.5
tmp25 = tmp23 * tmp24
tmp26 = tl.broadcast_to(tmp25, [RBLOCK])
tmp28 = triton_helpers.promote_to_tensor(tl.sum(tmp26, 0))
tl.store(out_ptr0 + tl.full([1], 0, tl.int32), tmp28, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0[grid(1)](arg0_1
, arg1_1, arg2_1, arg3_1, arg4_1, buf0, 1, 256, num_warps=2,
num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
return buf0,
class IIDIsotropicGaussianUVLossNew(nn.Module):
"""
Loss for the case of iid residuals with isotropic covariance:
$Sigma_i = sigma_i^2 I$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IIDIsotropicGaussianUVLossNew, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, input_0, input_1, input_2, input_3, input_4):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1])
return output[0]
|
FluteXu/DW-Research
|
IIDIsotropicGaussianUVLoss
| false | 13,687 |
[
"Apache-2.0"
] | 780 |
6b559d2d1d440c07e5936a65cd74a3bc657962dc
|
https://github.com/FluteXu/DW-Research/tree/6b559d2d1d440c07e5936a65cd74a3bc657962dc
|
Swish
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/wh/cwhlzq2lurdrlf2dvcoyooosnseuytdusm5sspnbruhsx5jagloi.py
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %view_2), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp0
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, primals_2, buf0, 256, grid=grid(256), stream=stream0)
return (buf0, primals_1, primals_2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.utils.data
import torch.cuda
from torch.nn import Parameter
import torch.optim
class Swish(nn.Module):
def __init__(self, dim):
super(Swish, self).__init__()
self.betas = Parameter(torch.ones(dim))
self.dim = dim
def forward(self, x):
pre_size = x.size()
return x * nn.Sigmoid()(self.betas.view(-1, self.dim) * x.view(-1,
self.dim)).view(pre_size)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.utils.data
import torch.cuda
from torch.nn import Parameter
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp1 * tmp0
tmp3 = tl.sigmoid(tmp2)
tmp4 = tmp0 * tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(256)](primals_1, primals_2, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
return buf0, primals_1, primals_2
class SwishNew(nn.Module):
def __init__(self, dim):
super(SwishNew, self).__init__()
self.betas = Parameter(torch.ones(dim))
self.dim = dim
def forward(self, input_0):
primals_2 = self.betas
primals_1 = input_0
output = call([primals_1, primals_2])
return output[0]
|
Flamexmt/LMA
|
Swish
| false | 13,688 |
[
"MIT"
] | 321 |
f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
|
https://github.com/Flamexmt/LMA/tree/f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
|
Hsigmoid
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/gl/cgljna3wfarubemgd6d2p3bgazvfhdxtrcu7luu5yza3rrfkty2s.py
# Topologically Sorted Source Nodes: [add, relu6, truediv], Original ATen: [aten.add, aten.hardtanh, aten.div]
# Source node to ATen node mapping:
# add => add
# relu6 => clamp_max, clamp_min
# truediv => div
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%arg0_1, 3.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {})
triton_poi_fused_add_div_hardtanh_0 = async_compile.triton('triton_poi_fused_add_div_hardtanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_hardtanh_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, relu6, truediv], Original ATen: [aten.add, aten.hardtanh, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_div_hardtanh_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.0
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = triton_helpers.maximum(tmp2, tmp3)
tmp5 = 6.0
tmp6 = triton_helpers.minimum(tmp4, tmp5)
tmp7 = 0.16666666666666666
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x0, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_div_hardtanh_0[grid(256)](arg0_1, buf0, 256,
XBLOCK=256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class HsigmoidNew(nn.Module):
def __init__(self, inplace=True):
super(HsigmoidNew, self).__init__()
self.inplace = inplace
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
FluteXu/DW-Research
|
Hsigmoid
| false | 13,689 |
[
"Apache-2.0"
] | 780 |
6b559d2d1d440c07e5936a65cd74a3bc657962dc
|
https://github.com/FluteXu/DW-Research/tree/6b559d2d1d440c07e5936a65cd74a3bc657962dc
|
GlobalAttention
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/tt/cttw3vm33n2lw4cgw4ebozy3tf3zo23w2lpfcfleug2icgsm3rv2.py
# Topologically Sorted Source Nodes: [eq, attn_score_3, attn_score_2, attn_score_4], Original ATen: [aten.eq, aten.masked_fill, aten.mul, aten._softmax]
# Source node to ATen node mapping:
# attn_score_2 => mul
# attn_score_3 => full_default, where
# attn_score_4 => amax, exp, sub, sum_1
# eq => eq
# Graph fragment:
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg2_1, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -9.999999843067494e+17), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %arg2_1), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
triton_poi_fused__softmax_eq_masked_fill_mul_0 = async_compile.triton('triton_poi_fused__softmax_eq_masked_fill_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_eq_masked_fill_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_eq_masked_fill_mul_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = tmp3 * tmp0
tmp5 = -9.999999843067494e+17
tmp6 = tl.where(tmp2, tmp5, tmp4)
tmp8 = tmp7 == tmp1
tmp10 = tmp9 * tmp7
tmp11 = tl.where(tmp8, tmp5, tmp10)
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp14 = tmp13 == tmp1
tmp16 = tmp15 * tmp13
tmp17 = tl.where(tmp14, tmp5, tmp16)
tmp18 = triton_helpers.maximum(tmp12, tmp17)
tmp20 = tmp19 == tmp1
tmp22 = tmp21 * tmp19
tmp23 = tl.where(tmp20, tmp5, tmp22)
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp25 = tmp6 - tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp11 - tmp24
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp17 - tmp24
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tmp33 = tmp23 - tmp24
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tl.store(out_ptr0 + (x0), tmp24, xmask)
tl.store(out_ptr1 + (x0), tmp35, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ig/cigestvf362cdchx4siwxjso4caffqwjbny7ydo3zdsbbbxrbxxk.py
# Topologically Sorted Source Nodes: [eq_1, attn_score_5, eq, attn_score_3, attn_score_2, attn_score_4], Original ATen: [aten.eq, aten.masked_fill, aten.mul, aten._softmax]
# Source node to ATen node mapping:
# attn_score_2 => mul
# attn_score_3 => full_default, where
# attn_score_4 => amax, div, exp, sub
# attn_score_5 => full_default_1, where_1
# eq => eq
# eq_1 => eq_1
# Graph fragment:
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg2_1, 0), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %eq : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%arg2_1, 0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -9.999999843067494e+17), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%squeeze, %arg2_1), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %mul), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %div), kwargs = {})
triton_poi_fused__softmax_eq_masked_fill_mul_1 = async_compile.triton('triton_poi_fused__softmax_eq_masked_fill_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_eq_masked_fill_mul_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_eq_masked_fill_mul_1(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_out_ptr0 + (x2), xmask)
tmp7 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = tmp3 * tmp0
tmp5 = -9.999999843067494e+17
tmp6 = tl.where(tmp2, tmp5, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tmp12 = tl.where(tmp2, tmp1, tmp11)
tl.store(in_out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/5m/c5mtwrg4eh3igs2xxg7nnr22e2hdcrsyaf57nqxfsljmq6ws4wht.py
# Topologically Sorted Source Nodes: [mul_1, attn_memory], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# attn_memory => sum_2
# mul_1 => mul_1
# Graph fragment:
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%unsqueeze_1, %arg3_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [1]), kwargs = {})
triton_poi_fused_mul_sum_2 = async_compile.triton('triton_poi_fused_mul_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + (x2), tmp14, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_score], Original ATen: [aten.bmm]
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 1), (4, 1, 1), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
# Topologically Sorted Source Nodes: [eq, attn_score_3, attn_score_2, attn_score_4], Original ATen: [aten.eq, aten.masked_fill, aten.mul, aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_eq_masked_fill_mul_0.run(arg2_1, buf0, buf1, buf2, 4, grid=grid(4), stream=stream0)
buf3 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [eq_1, attn_score_5, eq, attn_score_3, attn_score_2, attn_score_4], Original ATen: [aten.eq, aten.masked_fill, aten.mul, aten._softmax]
triton_poi_fused__softmax_eq_masked_fill_mul_1.run(buf3, arg2_1, buf1, buf2, 16, grid=grid(16), stream=stream0)
del arg2_1
del buf1
del buf2
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_1, attn_memory], Original ATen: [aten.mul, aten.sum]
triton_poi_fused_mul_sum_2.run(buf3, arg3_1, buf4, 16, grid=grid(16), stream=stream0)
del arg3_1
return (buf3, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class GlobalAttention(nn.Module):
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`,
to an output of size `dim`.
All models compute the output as
:math:`c = sum_{j=1}^{SeqLength} a_j H_j` where
:math:`a_j` is the softmax of a score function.
However they differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`score(H_j, q) = w_a^T tanh(W_a q + U_a h_j)`
Args:
attn_size (int): dimensionality of query and key
attn_type (str): type of attention to use, options [dot,general,mlp]
"""
def __init__(self, query_size, attn_size, attn_type='dot'):
super(GlobalAttention, self).__init__()
self.query_size = query_size
self.attn_size = attn_size
self.attn_type = attn_type
if self.attn_type == 'general':
self.linear_in = nn.Linear(query_size, attn_size, bias=False)
elif self.attn_type == 'mlp':
self.linear_query = nn.Linear(query_size, attn_size, bias=True)
self.attn_w = nn.Linear(attn_size, 1, bias=False)
elif self.attn_type == 'dot':
assert self.query_size == self.attn_size
def forward(self, query, memory_keys, memory_values, memory_masks):
"""
Args:
query (`FloatTensor`): (batch, query_size)
memory_keys (`FloatTensor`): (batch, seq_len, attn_size)
memory_values (`FloatTensor`): (batch, seq_len, attn_size)
memory_masks (`LongTensor`): (batch, seq_len)
Returns:
attn_score: attention distributions (batch, seq_len)
attn_memory: computed context vector, (batch, attn_size)
"""
batch_size, seq_len, attn_size = memory_keys.size()
if self.attn_type == 'mlp':
query_hidden = self.linear_query(query.unsqueeze(1)).expand(
batch_size, seq_len, attn_size)
attn_hidden = torch.tanh(query_hidden + memory_keys)
attn_score = self.attn_w(attn_hidden)
elif self.attn_type == 'dot':
attn_score = torch.bmm(memory_keys, query.unsqueeze(2))
elif self.attn_type == 'general':
query_hidden = self.linear_in(query)
attn_score = torch.bmm(memory_keys, query_hidden.unsqueeze(2))
attn_score = attn_score.squeeze(2)
if memory_masks is not None:
attn_score = attn_score * memory_masks
attn_score = attn_score.masked_fill(memory_masks == 0, -1e+18)
attn_score = F.softmax(attn_score, dim=1)
if memory_masks is not None:
attn_score = attn_score.masked_fill(memory_masks == 0, 0)
attn_memory = torch.sum(attn_score.unsqueeze(2) * memory_values, 1)
return attn_score, attn_memory
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4, 4]), torch.rand([4, 4]),
torch.rand([4, 4])]
def get_init_inputs():
return [[], {'query_size': 4, 'attn_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_eq_masked_fill_mul_0(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp21 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = tmp3 * tmp0
tmp5 = -9.999999843067494e+17
tmp6 = tl.where(tmp2, tmp5, tmp4)
tmp8 = tmp7 == tmp1
tmp10 = tmp9 * tmp7
tmp11 = tl.where(tmp8, tmp5, tmp10)
tmp12 = triton_helpers.maximum(tmp6, tmp11)
tmp14 = tmp13 == tmp1
tmp16 = tmp15 * tmp13
tmp17 = tl.where(tmp14, tmp5, tmp16)
tmp18 = triton_helpers.maximum(tmp12, tmp17)
tmp20 = tmp19 == tmp1
tmp22 = tmp21 * tmp19
tmp23 = tl.where(tmp20, tmp5, tmp22)
tmp24 = triton_helpers.maximum(tmp18, tmp23)
tmp25 = tmp6 - tmp24
tmp26 = tl_math.exp(tmp25)
tmp27 = tmp11 - tmp24
tmp28 = tl_math.exp(tmp27)
tmp29 = tmp26 + tmp28
tmp30 = tmp17 - tmp24
tmp31 = tl_math.exp(tmp30)
tmp32 = tmp29 + tmp31
tmp33 = tmp23 - tmp24
tmp34 = tl_math.exp(tmp33)
tmp35 = tmp32 + tmp34
tl.store(out_ptr0 + x0, tmp24, xmask)
tl.store(out_ptr1 + x0, tmp35, xmask)
@triton.jit
def triton_poi_fused__softmax_eq_masked_fill_mul_1(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_out_ptr0 + x2, xmask)
tmp7 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = tmp3 * tmp0
tmp5 = -9.999999843067494e+17
tmp6 = tl.where(tmp2, tmp5, tmp4)
tmp8 = tmp6 - tmp7
tmp9 = tl_math.exp(tmp8)
tmp11 = tmp9 / tmp10
tmp12 = tl.where(tmp2, tmp1, tmp11)
tl.store(in_out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_mul_sum_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (4 + x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (8 + x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (12 + x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tl.store(out_ptr0 + x2, tmp14, xmask)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(arg1_1, (4, 4), (4, 1))
assert_size_stride(arg2_1, (4, 4), (4, 1))
assert_size_stride(arg3_1, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(arg0_1, reinterpret_tensor(arg1_1, (4, 4, 1), (4,
1, 1), 0), out=buf0)
del arg0_1
del arg1_1
buf1 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
buf2 = empty_strided_cuda((4, 1), (1, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_eq_masked_fill_mul_0[grid(4)](arg2_1,
buf0, buf1, buf2, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf0, (4, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_eq_masked_fill_mul_1[grid(16)](buf3,
arg2_1, buf1, buf2, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg2_1
del buf1
del buf2
buf4 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_mul_sum_2[grid(16)](buf3, arg3_1, buf4, 16, XBLOCK
=16, num_warps=1, num_stages=1)
del arg3_1
return buf3, buf4
class GlobalAttentionNew(nn.Module):
"""
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
Constructs a unit mapping a query `q` of size `dim`
and a source matrix `H` of size `n x dim`,
to an output of size `dim`.
All models compute the output as
:math:`c = sum_{j=1}^{SeqLength} a_j H_j` where
:math:`a_j` is the softmax of a score function.
However they differ on how they compute the attention score.
* Luong Attention (dot, general):
* dot: :math:`score(H_j,q) = H_j^T q`
* general: :math:`score(H_j, q) = H_j^T W_a q`
* Bahdanau Attention (mlp):
* :math:`score(H_j, q) = w_a^T tanh(W_a q + U_a h_j)`
Args:
attn_size (int): dimensionality of query and key
attn_type (str): type of attention to use, options [dot,general,mlp]
"""
def __init__(self, query_size, attn_size, attn_type='dot'):
super(GlobalAttentionNew, self).__init__()
self.query_size = query_size
self.attn_size = attn_size
self.attn_type = attn_type
if self.attn_type == 'general':
self.linear_in = nn.Linear(query_size, attn_size, bias=False)
elif self.attn_type == 'mlp':
self.linear_query = nn.Linear(query_size, attn_size, bias=True)
self.attn_w = nn.Linear(attn_size, 1, bias=False)
elif self.attn_type == 'dot':
assert self.query_size == self.attn_size
def forward(self, input_0, input_1, input_2, input_3):
arg1_1 = input_0
arg0_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
output = call([arg0_1, arg1_1, arg2_1, arg3_1])
return output[0], output[1]
|
Fenkail/hgr_v2t
|
GlobalAttention
| false | 13,690 |
[
"MIT"
] | 190 |
d8cc1c18cdaae54fd1878d6dc7b8e9c60d83fcbb
|
https://github.com/Fenkail/hgr_v2t/tree/d8cc1c18cdaae54fd1878d6dc7b8e9c60d83fcbb
|
Decoder
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/oc/cocqf4qhu7yvep7mkub4zpbgiay22ow2hnah6w2ctiygxz4gcme7.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 131072
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (3200*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jq/cjqcnpaa3l4qbgseb6g76lnbktmrwvyhksgxwgikrhznwn7uoa6u.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/p2/cp2lj3lcshplvfynyp6c6ak7w2svmfx43xu5kl32fpseuscosvpu.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (36*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/re/crem3gm53vdacreu2vohuuqyhjo2sufz24lohiz4jwu4imx7anvc.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (36*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (144*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vl/cvlmnq3doiwlvuemznisau56la6736cmzu2ourqgwd5u73k73wtk.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_4 = async_compile.triton('triton_poi_fused_relu_threshold_backward_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_4', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/a2/ca2cers4qyityc6q63ngzhzx6ez6m2a3nhjunjai2lmu46bpdka4.py
# Topologically Sorted Source Nodes: [conv_transpose2d, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d => convolution
# x_2 => relu_1
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vi/cvi2r4z4m2rtcdbzc7fc7wah5x5b73urgvs7ahsbieimm3rd42hb.py
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_3], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d_1 => convolution_1
# x_3 => relu_2
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ln/clnjlvuwhtkn2wazj5nsrb26rk37mqgfieyluqm57jofikrbvo4c.py
# Topologically Sorted Source Nodes: [conv_transpose2d_2, x_4], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d_2 => convolution_2
# x_4 => relu_3
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4r/c4rw4bmnj2wlkm5hmakjt6n5kuwhlaclxxysnsjkamv66abseyfr.py
# Topologically Sorted Source Nodes: [conv_transpose2d_3, reconstruction], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv_transpose2d_3 => convolution_3
# reconstruction => sigmoid
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_3, %primals_10, %primals_11, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_3,), kwargs = {})
triton_poi_fused_convolution_sigmoid_8 = async_compile.triton('triton_poi_fused_convolution_sigmoid_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_8(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16384*y1)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp3, ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11 = args
args.clear()
assert_size_stride(primals_1, (1024, 4), (4, 1))
assert_size_stride(primals_2, (1024, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1024, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_5, (128, ), (1, ))
assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 32, 6, 6), (1152, 36, 6, 1))
assert_size_stride(primals_9, (32, ), (1, ))
assert_size_stride(primals_10, (32, 4, 6, 6), (144, 36, 6, 1))
assert_size_stride(primals_11, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_4, buf0, 131072, 25, grid=grid(131072, 25), stream=stream0)
del primals_4
buf1 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_6, buf1, 8192, 25, grid=grid(8192, 25), stream=stream0)
del primals_6
buf2 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_8, buf2, 2048, 36, grid=grid(2048, 36), stream=stream0)
del primals_8
buf3 = empty_strided_cuda((32, 4, 6, 6), (144, 1, 24, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_10, buf3, 128, 36, grid=grid(128, 36), stream=stream0)
del primals_10
buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 1024), (1, 4), 0), out=buf4)
del primals_1
buf5 = buf4; del buf4 # reuse
buf14 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_4.run(buf5, primals_2, buf14, 4096, grid=grid(4096), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 1024, 1, 1), (1024, 1, 0, 0), 0), buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 128, 5, 5), (3200, 1, 640, 128))
buf7 = buf6; del buf6 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf7, primals_5, 12800, grid=grid(12800), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf8 = extern_kernels.convolution(buf7, buf1, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 13, 13), (10816, 1, 832, 64))
buf9 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_3], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf9, primals_7, 43264, grid=grid(43264), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 30, 30), (28800, 1, 960, 32))
buf11 = buf10; del buf10 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_2, x_4], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf11, primals_9, 115200, grid=grid(115200), stream=stream0)
del primals_9
# Topologically Sorted Source Nodes: [conv_transpose2d_3], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 64, 64), (16384, 1, 256, 4))
buf13 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv_transpose2d_3, reconstruction], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_8.run(buf12, primals_11, buf13, 16, 4096, grid=grid(16, 4096), stream=stream0)
del buf12
del primals_11
return (buf13, primals_3, buf0, buf1, buf2, buf3, reinterpret_tensor(buf5, (4, 1024, 1, 1), (1024, 1, 1, 1), 0), buf7, buf9, buf11, buf13, buf14, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1024, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1024, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 32, 6, 6), (1152, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((32, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
""" VAE decoder """
def __init__(self, img_channels, latent_size):
super(Decoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.fc1 = nn.Linear(latent_size, 1024)
self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = x.unsqueeze(-1).unsqueeze(-1)
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = F.relu(self.deconv3(x))
reconstruction = F.sigmoid(self.deconv4(x))
return reconstruction
def get_inputs():
return [torch.rand([4, 4])]
def get_init_inputs():
return [[], {'img_channels': 4, 'latent_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 144 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_4(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_8(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11) = args
args.clear()
assert_size_stride(primals_1, (1024, 4), (4, 1))
assert_size_stride(primals_2, (1024,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (1024, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_5, (128,), (1,))
assert_size_stride(primals_6, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 32, 6, 6), (1152, 36, 6, 1))
assert_size_stride(primals_9, (32,), (1,))
assert_size_stride(primals_10, (32, 4, 6, 6), (144, 36, 6, 1))
assert_size_stride(primals_11, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(131072, 25)](primals_4, buf0, 131072, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_4
buf1 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_1[grid(8192, 25)](primals_6, buf1, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_6
buf2 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch
.float32)
triton_poi_fused_2[grid(2048, 36)](primals_8, buf2, 2048, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_8
buf3 = empty_strided_cuda((32, 4, 6, 6), (144, 1, 24, 4), torch.float32
)
triton_poi_fused_3[grid(128, 36)](primals_10, buf3, 128, 36, XBLOCK
=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_10
buf4 = empty_strided_cuda((4, 1024), (1024, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(primals_1, (4, 1024
), (1, 4), 0), out=buf4)
del primals_1
buf5 = buf4
del buf4
buf14 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_4[grid(4096)](buf5,
primals_2, buf14, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf6 = extern_kernels.convolution(reinterpret_tensor(buf5, (4, 1024,
1, 1), (1024, 1, 0, 0), 0), buf0, stride=(2, 2), padding=(0, 0),
dilation=(1, 1), transposed=True, output_padding=(0, 0), groups
=1, bias=None)
assert_size_stride(buf6, (4, 128, 5, 5), (3200, 1, 640, 128))
buf7 = buf6
del buf6
triton_poi_fused_convolution_relu_5[grid(12800)](buf7, primals_5,
12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf8 = extern_kernels.convolution(buf7, buf1, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf8, (4, 64, 13, 13), (10816, 1, 832, 64))
buf9 = buf8
del buf8
triton_poi_fused_convolution_relu_6[grid(43264)](buf9, primals_7,
43264, XBLOCK=512, num_warps=4, num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 32, 30, 30), (28800, 1, 960, 32))
buf11 = buf10
del buf10
triton_poi_fused_convolution_relu_7[grid(115200)](buf11, primals_9,
115200, XBLOCK=512, num_warps=8, num_stages=1)
del primals_9
buf12 = extern_kernels.convolution(buf11, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 4, 64, 64), (16384, 1, 256, 4))
buf13 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_sigmoid_8[grid(16, 4096)](buf12,
primals_11, buf13, 16, 4096, XBLOCK=32, YBLOCK=16, num_warps=4,
num_stages=1)
del buf12
del primals_11
return buf13, primals_3, buf0, buf1, buf2, buf3, reinterpret_tensor(buf5,
(4, 1024, 1, 1), (1024, 1, 1, 1), 0), buf7, buf9, buf11, buf13, buf14
class DecoderNew(nn.Module):
""" VAE decoder """
def __init__(self, img_channels, latent_size):
super(DecoderNew, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.fc1 = nn.Linear(latent_size, 1024)
self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2)
def forward(self, input_0):
primals_1 = self.fc1.weight
primals_2 = self.fc1.bias
primals_4 = self.deconv1.weight
primals_5 = self.deconv1.bias
primals_6 = self.deconv2.weight
primals_7 = self.deconv2.bias
primals_8 = self.deconv3.weight
primals_9 = self.deconv3.bias
primals_10 = self.deconv4.weight
primals_11 = self.deconv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11])
return output[0]
|
FabianSchuetze/world-models
|
Decoder
| false | 13,691 |
[
"MIT"
] | 440 |
d6abd9ce97409734a766eb67ccf0d1967ba9bf0c
|
https://github.com/FabianSchuetze/world-models/tree/d6abd9ce97409734a766eb67ccf0d1967ba9bf0c
|
IndepAnisotropicGaussianUVLoss
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/2f/c2fihp3eabdoclhz6gdz723nsdjyue5ykxbe3cdbfc2itfhvb5zw.py
# Topologically Sorted Source Nodes: [softplus, sigma2, pow_1, pow_2, r_sqnorm2, add_4, denom2, log, add_5, delta_u, pow_3, delta_v, pow_4, delta_sqnorm, truediv, add_6, delta_u_r_u, delta_v_r_v, delta_r, delta_r_sqnorm, truediv_1, sub_2, loss, sum_1], Original ATen: [aten.softplus, aten.add, aten.pow, aten.mul, aten.log, aten.sub, aten.div, aten.sum]
# Source node to ATen node mapping:
# add_4 => add_4
# add_5 => add_5
# add_6 => add_6
# delta_r => add_3
# delta_r_sqnorm => pow_5
# delta_sqnorm => add_2
# delta_u => sub
# delta_u_r_u => mul
# delta_v => sub_1
# delta_v_r_v => mul_1
# denom2 => mul_2
# log => log
# loss => mul_3
# pow_1 => pow_1
# pow_2 => pow_2
# pow_3 => pow_3
# pow_4 => pow_4
# r_sqnorm2 => add_1
# sigma2 => add
# softplus => exp, gt, log1p, where
# sub_2 => sub_2
# sum_1 => sum_1
# truediv => div
# truediv_1 => div_1
# Graph fragment:
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%arg0_1, 20), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%arg0_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %arg0_1, %log1p), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%where, 4), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg1_1, 2), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%arg2_1, 2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_1, %pow_2), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %add_1), kwargs = {})
# %mul_2 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, %add_4), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%mul_2,), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%log, 1.8378770664093453), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg3_1, %arg4_1), kwargs = {})
# %pow_3 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub, 2), kwargs = {})
# %sub_1 : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg5_1, %arg6_1), kwargs = {})
# %pow_4 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%sub_1, 2), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%pow_3, %pow_4), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%add_2, %add), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_5, %div), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg2_1), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %mul_1), kwargs = {})
# %pow_5 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%add_3, 2), kwargs = {})
# %div_1 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%pow_5, %mul_2), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_6, %div_1), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_2, 0.5), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_3,), kwargs = {})
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0 = async_compile.triton('triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {8: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 9), equal_to_1=(8,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0', 'mutated_arg_names': [], 'no_x_dim': True, 'num_load': 7, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp8 = tl.load(in_ptr1 + (r0), None)
tmp10 = tl.load(in_ptr2 + (r0), None)
tmp18 = tl.load(in_ptr3 + (r0), None)
tmp19 = tl.load(in_ptr4 + (r0), None)
tmp22 = tl.load(in_ptr5 + (r0), None)
tmp23 = tl.load(in_ptr6 + (r0), None)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 4.0
tmp7 = tmp5 + tmp6
tmp9 = tmp8 * tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp7 * tmp13
tmp15 = tl_math.log(tmp14)
tmp16 = 1.8378770664093453
tmp17 = tmp15 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp21 + tmp25
tmp27 = tmp26 / tmp7
tmp28 = tmp17 + tmp27
tmp29 = tmp20 * tmp8
tmp30 = tmp24 * tmp10
tmp31 = tmp29 + tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp32 / tmp14
tmp34 = tmp28 - tmp33
tmp35 = 0.5
tmp36 = tmp34 * tmp35
tmp37 = tl.broadcast_to(tmp36, [RBLOCK])
tmp39 = triton_helpers.promote_to_tensor(tl.sum(tmp37, 0))
tl.store(out_ptr1 + (tl.full([1], 0, tl.int32)), tmp39, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
# Topologically Sorted Source Nodes: [softplus, sigma2, pow_1, pow_2, r_sqnorm2, add_4, denom2, log, add_5, delta_u, pow_3, delta_v, pow_4, delta_sqnorm, truediv, add_6, delta_u_r_u, delta_v_r_v, delta_r, delta_r_sqnorm, truediv_1, sub_2, loss, sum_1], Original ATen: [aten.softplus, aten.add, aten.pow, aten.mul, aten.log, aten.sub, aten.div, aten.sum]
stream0 = get_raw_stream(0)
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0.run(arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, buf1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del arg5_1
del arg6_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg2_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg3_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg4_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg5_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg6_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
class IndepAnisotropicGaussianUVLoss(nn.Module):
"""
Loss for the case of independent residuals with anisotropic covariances:
$Sigma_i = sigma_i^2 I + r_i r_i^T$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi)
+ log sigma_i^2 (sigma_i^2 + ||r_i||^2)
+ ||delta_i||^2 / sigma_i^2
- <delta_i, r_i>^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IndepAnisotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, u: 'torch.Tensor', v: 'torch.Tensor', sigma_u:
'torch.Tensor', kappa_u_est: 'torch.Tensor', kappa_v_est:
'torch.Tensor', target_u: 'torch.Tensor', target_v: 'torch.Tensor'):
sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
r_sqnorm2 = kappa_u_est ** 2 + kappa_v_est ** 2
delta_u = u - target_u
delta_v = v - target_v
delta_sqnorm = delta_u ** 2 + delta_v ** 2
delta_u_r_u = delta_u * kappa_u_est
delta_v_r_v = delta_v * kappa_v_est
delta_r = delta_u_r_u + delta_v_r_v
delta_r_sqnorm = delta_r ** 2
denom2 = sigma2 * (sigma2 + r_sqnorm2)
loss = 0.5 * (self.log2pi + torch.log(denom2) + delta_sqnorm /
sigma2 - delta_r_sqnorm / denom2)
return loss.sum()
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand(
[4, 4, 4, 4]), torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4]),
torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'sigma_lower_bound': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.utils.data
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr1, xnumel,
rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp8 = tl.load(in_ptr1 + r0, None)
tmp10 = tl.load(in_ptr2 + r0, None)
tmp18 = tl.load(in_ptr3 + r0, None)
tmp19 = tl.load(in_ptr4 + r0, None)
tmp22 = tl.load(in_ptr5 + r0, None)
tmp23 = tl.load(in_ptr6 + r0, None)
tmp1 = 20.0
tmp2 = tmp0 > tmp1
tmp3 = tl_math.exp(tmp0)
tmp4 = libdevice.log1p(tmp3)
tmp5 = tl.where(tmp2, tmp0, tmp4)
tmp6 = 4.0
tmp7 = tmp5 + tmp6
tmp9 = tmp8 * tmp8
tmp11 = tmp10 * tmp10
tmp12 = tmp9 + tmp11
tmp13 = tmp7 + tmp12
tmp14 = tmp7 * tmp13
tmp15 = tl_math.log(tmp14)
tmp16 = 1.8378770664093453
tmp17 = tmp15 + tmp16
tmp20 = tmp18 - tmp19
tmp21 = tmp20 * tmp20
tmp24 = tmp22 - tmp23
tmp25 = tmp24 * tmp24
tmp26 = tmp21 + tmp25
tmp27 = tmp26 / tmp7
tmp28 = tmp17 + tmp27
tmp29 = tmp20 * tmp8
tmp30 = tmp24 * tmp10
tmp31 = tmp29 + tmp30
tmp32 = tmp31 * tmp31
tmp33 = tmp32 / tmp14
tmp34 = tmp28 - tmp33
tmp35 = 0.5
tmp36 = tmp34 * tmp35
tmp37 = tl.broadcast_to(tmp36, [RBLOCK])
tmp39 = triton_helpers.promote_to_tensor(tl.sum(tmp37, 0))
tl.store(out_ptr1 + tl.full([1], 0, tl.int32), tmp39, None)
def call(args):
arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg2_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg3_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg4_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg5_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg6_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((), (), torch.float32)
get_raw_stream(0)
triton_per_fused_add_div_log_mul_pow_softplus_sub_sum_0[grid(1)](arg0_1
, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1, buf1, 1, 256,
num_warps=2, num_stages=1)
del arg0_1
del arg1_1
del arg2_1
del arg3_1
del arg4_1
del arg5_1
del arg6_1
return buf1,
class IndepAnisotropicGaussianUVLossNew(nn.Module):
"""
Loss for the case of independent residuals with anisotropic covariances:
$Sigma_i = sigma_i^2 I + r_i r_i^T$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi)
+ log sigma_i^2 (sigma_i^2 + ||r_i||^2)
+ ||delta_i||^2 / sigma_i^2
- <delta_i, r_i>^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: 'float'):
super(IndepAnisotropicGaussianUVLossNew, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(self, input_0, input_1, input_2, input_3, input_4, input_5,
input_6):
arg0_1 = input_0
arg1_1 = input_1
arg2_1 = input_2
arg3_1 = input_3
arg4_1 = input_4
arg5_1 = input_5
arg6_1 = input_6
output = call([arg0_1, arg1_1, arg2_1, arg3_1, arg4_1, arg5_1, arg6_1])
return output[0]
|
FluteXu/DW-Research
|
IndepAnisotropicGaussianUVLoss
| false | 13,692 |
[
"Apache-2.0"
] | 780 |
6b559d2d1d440c07e5936a65cd74a3bc657962dc
|
https://github.com/FluteXu/DW-Research/tree/6b559d2d1d440c07e5936a65cd74a3bc657962dc
|
Dueling_Critic
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x1 => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/u4/cu4ydaxmd4p44x67hyup2jel4m36q72kgq25tgfiaxiibmy7ycbt.py
# Topologically Sorted Source Nodes: [add, mean, x3], Original ATen: [aten.add, aten.mean, aten.sub]
# Source node to ATen node mapping:
# add => add
# mean => mean
# x3 => sub
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_5, %view_7), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%view_7, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
triton_poi_fused_add_mean_sub_1 = async_compile.triton('triton_poi_fused_add_mean_sub_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_sub_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = (xindex // 4)
x5 = xindex
x3 = (xindex // 64)
x6 = xindex % 16
tmp0 = tl.load(in_ptr0 + (x4), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x5), xmask)
tmp6 = tl.load(in_ptr2 + (x6 + (64*x3)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr2 + (16 + x6 + (64*x3)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (32 + x6 + (64*x3)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (48 + x6 + (64*x3)), xmask, eviction_policy='evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp12 = tmp10 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = tmp5 - tmp14
tl.store(out_ptr0 + (x5), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x1], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf5, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [y2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, mean, x3], Original ATen: [aten.add, aten.mean, aten.sub]
triton_poi_fused_add_mean_sub_1.run(buf2, primals_5, buf3, buf4, 256, grid=grid(256), stream=stream0)
del buf2
del buf3
del primals_5
return (buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), primals_6, primals_4, buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn.functional as F
import torch.nn as nn
class Dueling_Critic(nn.Module):
def __init__(self, input_size, output_size, hidden_size):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, 1)
self.linear3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x1 = F.relu(self.linear1(x))
x2 = F.relu(self.linear1(x))
y1 = self.linear2(x1)
y2 = self.linear3(x2)
x3 = y1 + y2 - y2.mean(dim=1, keepdim=True)
return x3
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4, 'hidden_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_mean_sub_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x4 = xindex // 4
x5 = xindex
x3 = xindex // 64
x6 = xindex % 16
tmp0 = tl.load(in_ptr0 + x4, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x5, xmask)
tmp6 = tl.load(in_ptr2 + (x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr2 + (16 + x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp9 = tl.load(in_ptr2 + (32 + x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp11 = tl.load(in_ptr2 + (48 + x6 + 64 * x3), xmask, eviction_policy=
'evict_last')
tmp3 = tmp0 + tmp2
tmp5 = tmp3 + tmp4
tmp8 = tmp6 + tmp7
tmp10 = tmp8 + tmp9
tmp12 = tmp10 + tmp11
tmp13 = 4.0
tmp14 = tmp12 / tmp13
tmp15 = tmp5 - tmp14
tl.store(out_ptr0 + x5, tmp15, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4), (4, 1))
assert_size_stride(primals_5, (1,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf5, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 1), (1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf1, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf3)
del primals_7
buf4 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mean_sub_1[grid(256)](buf2, primals_5, buf3,
buf4, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf2
del buf3
del primals_5
return buf4, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0
), primals_6, primals_4, buf5
class Dueling_CriticNew(nn.Module):
def __init__(self, input_size, output_size, hidden_size):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.linear1 = nn.Linear(input_size, hidden_size)
self.linear2 = nn.Linear(hidden_size, 1)
self.linear3 = nn.Linear(hidden_size, output_size)
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
FlickerNiko/ai_lib
|
Dueling_Critic
| false | 13,693 |
[
"MIT"
] | 99 |
7087d4569c9a827d35dd8735b55a080834d31a82
|
https://github.com/FlickerNiko/ai_lib/tree/7087d4569c9a827d35dd8735b55a080834d31a82
|
BoundaryEntDiscriminator
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/x5/cx5x3ctvlxfngzqxjhckinfe5o47op62tt3oe6cizqbvhu2jadcg.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt, mul, where
# Graph fragment:
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.2), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_leaky_relu_0 = async_compile.triton('triton_poi_fused_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/j6/cj6preby6v25qpri36zdc64okaiku4g3n2g3ntdegoyuutlffwkx.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_1 => gt_1, mul_1, where_1
# Graph fragment:
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_1, 0.2), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %convolution_1, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/24/c24d6loxuicc53yfdbmizoqhkrh5m2bpnt3a4ffeuex7f6y3vnth.py
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_2 => gt_2, mul_2, where_2
# Graph fragment:
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.2), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, xmask)
tl.store(out_ptr1 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f7/cf7lcprivgof4fqsrblqrk334iqoref57wus34pdpmrc65sotzf7.py
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_3 => gt_3, mul_3, where_3
# Graph fragment:
# %gt_3 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_3, 0), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_3, 0.2), kwargs = {})
# %where_3 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %convolution_3, %mul_3), kwargs = {})
triton_poi_fused_leaky_relu_3 = async_compile.triton('triton_poi_fused_leaky_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 51200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + (x0), tmp2, None)
tl.store(out_ptr1 + (x0), tmp5, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_6, (1, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.bool)
buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_leaky_relu_0.run(buf0, buf1, buf2, 278784, grid=grid(278784), stream=stream0)
del buf0
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1))
buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.bool)
buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf3, buf4, buf5, 147968, grid=grid(147968), stream=stream0)
del buf3
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1))
buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool)
buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf6, buf7, buf8, 82944, grid=grid(82944), stream=stream0)
del buf6
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1))
buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.bool)
buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_3], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_3.run(buf9, buf10, buf11, 51200, grid=grid(51200), stream=stream0)
del buf9
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.convolution]
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 3, 3), (9, 9, 3, 1))
return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((64, 3, 4, 4), (48, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((512, 256, 4, 4), (4096, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 512, 4, 4), (8192, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class BoundaryEntDiscriminator(nn.Module):
def __init__(self):
super(BoundaryEntDiscriminator, self).__init__()
filter_num_list = [64, 128, 256, 512, 1]
self.conv1 = nn.Conv2d(3, filter_num_list[0], kernel_size=4, stride
=2, padding=2, bias=False)
self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4],
kernel_size=4, stride=2, padding=2, bias=False)
self.leakyrelu = nn.LeakyReLU(negative_slope=0.2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.leakyrelu(self.conv1(x))
x = self.leakyrelu(self.conv2(x))
x = self.leakyrelu(self.conv3(x))
x = self.leakyrelu(self.conv4(x))
x = self.conv5(x)
return x
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_leaky_relu_0(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 278784
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 147968
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 82944
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, xmask)
tl.store(out_ptr1 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_3(in_ptr0, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, None)
tmp1 = 0.0
tmp2 = tmp0 > tmp1
tmp3 = 0.2
tmp4 = tmp0 * tmp3
tmp5 = tl.where(tmp2, tmp0, tmp4)
tl.store(out_ptr0 + x0, tmp2, None)
tl.store(out_ptr1 + x0, tmp5, None)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6 = args
args.clear()
assert_size_stride(primals_1, (64, 3, 4, 4), (48, 16, 4, 1))
assert_size_stride(primals_2, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_3, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_4, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_5, (512, 256, 4, 4), (4096, 16, 4, 1))
assert_size_stride(primals_6, (1, 512, 4, 4), (8192, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(2,
2), padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 64, 33, 33), (69696, 1089, 33, 1))
buf1 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 64, 33, 33), (69696, 1089, 33, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_leaky_relu_0[grid(278784)](buf0, buf1, buf2,
278784, XBLOCK=512, num_warps=8, num_stages=1)
del buf0
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 128, 17, 17), (36992, 289, 17, 1))
buf4 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.bool)
buf5 = empty_strided_cuda((4, 128, 17, 17), (36992, 289, 17, 1),
torch.float32)
triton_poi_fused_leaky_relu_1[grid(147968)](buf3, buf4, buf5,
147968, XBLOCK=512, num_warps=8, num_stages=1)
del buf3
buf6 = extern_kernels.convolution(buf5, primals_4, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 256, 9, 9), (20736, 81, 9, 1))
buf7 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.bool
)
buf8 = empty_strided_cuda((4, 256, 9, 9), (20736, 81, 9, 1), torch.
float32)
triton_poi_fused_leaky_relu_2[grid(82944)](buf6, buf7, buf8, 82944,
XBLOCK=512, num_warps=8, num_stages=1)
del buf6
buf9 = extern_kernels.convolution(buf8, primals_5, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 512, 5, 5), (12800, 25, 5, 1))
buf10 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.bool)
buf11 = empty_strided_cuda((4, 512, 5, 5), (12800, 25, 5, 1), torch
.float32)
triton_poi_fused_leaky_relu_3[grid(51200)](buf9, buf10, buf11,
51200, XBLOCK=256, num_warps=4, num_stages=1)
del buf9
buf12 = extern_kernels.convolution(buf11, primals_6, stride=(2, 2),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf12, (4, 1, 3, 3), (9, 9, 3, 1))
return (buf12, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, buf1, buf2, buf4, buf5, buf7, buf8, buf10, buf11)
class BoundaryEntDiscriminatorNew(nn.Module):
def __init__(self):
super(BoundaryEntDiscriminatorNew, self).__init__()
filter_num_list = [64, 128, 256, 512, 1]
self.conv1 = nn.Conv2d(3, filter_num_list[0], kernel_size=4, stride
=2, padding=2, bias=False)
self.conv2 = nn.Conv2d(filter_num_list[0], filter_num_list[1],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv3 = nn.Conv2d(filter_num_list[1], filter_num_list[2],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv4 = nn.Conv2d(filter_num_list[2], filter_num_list[3],
kernel_size=4, stride=2, padding=2, bias=False)
self.conv5 = nn.Conv2d(filter_num_list[3], filter_num_list[4],
kernel_size=4, stride=2, padding=2, bias=False)
self.leakyrelu = nn.LeakyReLU(negative_slope=0.2)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.zero_()
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.conv2.weight
primals_4 = self.conv3.weight
primals_5 = self.conv4.weight
primals_6 = self.conv5.weight
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6])
return output[0]
|
EmmaW8/BEAL
|
BoundaryEntDiscriminator
| false | 13,694 |
[
"MIT"
] | 95 |
945cad38a354605b8bca5bc01ae1b65848d605e1
|
https://github.com/EmmaW8/BEAL/tree/945cad38a354605b8bca5bc01ae1b65848d605e1
|
CombineSlices
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xj/cxjyiylkxhhw7puycpyh4rhisqin43l5kgucj4lyyniez2ypunvk.py
# Topologically Sorted Source Nodes: [index_select], Original ATen: [aten.index_select]
# Source node to ATen node mapping:
# index_select => index
# Graph fragment:
# %index : [num_users=1] = call_function[target=torch.ops.aten.index.Tensor](args = (%arg0_1, [None, None, %full_default]), kwargs = {})
triton_poi_fused_index_select_0 = async_compile.triton('triton_poi_fused_index_select_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_index_select_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_index_select_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x1)), xmask)
tl.store(out_ptr0 + (x2), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [index_select], Original ATen: [aten.index_select]
stream0 = get_raw_stream(0)
triton_poi_fused_index_select_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
import torch.fft
class CombineSlices(nn.Module):
def __init__(self, slice_dim=2):
super().__init__()
self.slice_dim = slice_dim
def forward(self, x):
return torch.index_select(x, dim=self.slice_dim, index=torch.tensor
(0, device=x.device))
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
import torch.fft
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_index_select_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x1), xmask)
tl.store(out_ptr0 + x2, tmp0, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 4), (16, 4, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_index_select_0[grid(64)](arg0_1, buf0, 64, XBLOCK=
64, num_warps=1, num_stages=1)
del arg0_1
return buf0,
class CombineSlicesNew(nn.Module):
def __init__(self, slice_dim=2):
super().__init__()
self.slice_dim = slice_dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Gaskell-1206/fastMRI
|
CombineSlices
| false | 13,695 |
[
"MIT"
] | 815 |
1b6d1f9020bc9209afa65ef9b9f2f3fa3348901c
|
https://github.com/Gaskell-1206/fastMRI/tree/1b6d1f9020bc9209afa65ef9b9f2f3fa3348901c
|
AttnGCNLayer
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/mz/cmzitldjc2sqcfz6nupzdve7bxtclxbg2ssodtqwhj6m7a3jc4sw.py
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
# Source node to ATen node mapping:
# eq => eq
# Graph fragment:
# %eq : [num_users=3] = call_function[target=torch.ops.aten.eq.Scalar](args = (%primals_6, 0), kwargs = {})
triton_poi_fused_eq_0 = async_compile.triton('triton_poi_fused_eq_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3f/c3flst7ps77bzg75fogklcjyjj4axnequyt5s6rvvj3gf3kkzp4u.py
# Topologically Sorted Source Nodes: [attn_scores, attn_scores_1, attn_scores_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# attn_scores => div
# attn_scores_1 => full_default, where
# attn_scores_2 => amax, exp, sub, sum_1
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_7, 2.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -9.999999843067494e+17), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [2], True), kwargs = {})
triton_poi_fused__softmax_div_masked_fill_1 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last').to(tl.int1)
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last').to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = -9.999999843067494e+17
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + (x0), tmp20, xmask)
tl.store(out_ptr1 + (x0), tmp31, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4p/c4pvlrcpsglufgsa2orma4ncwtkwvawffhwtjnbgfypv2g4gp4r3.py
# Topologically Sorted Source Nodes: [attn_scores, attn_scores_1, attn_scores_2, attn_scores_3], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
# Source node to ATen node mapping:
# attn_scores => div
# attn_scores_1 => full_default, where
# attn_scores_2 => amax, div_1, exp, sub
# attn_scores_3 => full_default_1, where_1
# Graph fragment:
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%view_7, 2.0), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -9.999999843067494e+17), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default, %div), kwargs = {})
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%where, [2], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%where, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq, %full_default_1, %div_1), kwargs = {})
triton_poi_fused__softmax_div_masked_fill_2 = async_compile.triton('triton_poi_fused__softmax_div_masked_fill_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i1', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_div_masked_fill_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_2(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask).to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp6 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = -9.999999843067494e+17
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp0, tmp11, tmp10)
tl.store(in_out_ptr0 + (x2), tmp10, xmask)
tl.store(out_ptr0 + (x2), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tc/ctcugqu2nbqlxcf2thnspnnypxifbalbzmclmutd5vaxdes2oyyk.py
# Topologically Sorted Source Nodes: [node_embeds, node_embeds_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# node_embeds => add
# node_embeds_1 => var_mean
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_9), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%add, [2]), kwargs = {correction: 0, keepdim: True})
triton_poi_fused_add_native_layer_norm_3 = async_compile.triton('triton_poi_fused_add_native_layer_norm_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + (x0), tmp16, xmask)
tl.store(out_ptr1 + (x0), tmp28, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ua/cuaquah4oaz43nhi25wixnpzlhvf2zfzdiezhmmwkuy5wfhtw6z4.py
# Topologically Sorted Source Nodes: [node_embeds, node_embeds_1], Original ATen: [aten.add, aten.native_layer_norm]
# Source node to ATen node mapping:
# node_embeds => add
# node_embeds_1 => add_1, add_2, mul, mul_1, rsqrt, sub_1
# Graph fragment:
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_3, %view_9), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_8), kwargs = {})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %primals_9), kwargs = {})
triton_poi_fused_add_native_layer_norm_4 = async_compile.triton('triton_poi_fused_add_native_layer_norm_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_native_layer_norm_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + (x0), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [einsum], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [eq], Original ATen: [aten.eq]
stream0 = get_raw_stream(0)
triton_poi_fused_eq_0.run(primals_6, buf3, 64, grid=grid(64), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
# Topologically Sorted Source Nodes: [attn_scores, attn_scores_1, attn_scores_2], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_1.run(buf3, buf2, buf4, buf5, 16, grid=grid(16), stream=stream0)
buf6 = buf2; del buf2 # reuse
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_scores, attn_scores_1, attn_scores_2, attn_scores_3], Original ATen: [aten.div, aten.masked_fill, aten._softmax]
triton_poi_fused__softmax_div_masked_fill_2.run(buf6, buf3, buf4, buf5, buf7, 64, grid=grid(64), stream=stream0)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_scores_3, bmm], Original ATen: [aten.masked_fill, aten.bmm]
extern_kernels.bmm(buf7, primals_3, out=buf8)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0); del buf7 # reuse
# Topologically Sorted Source Nodes: [ctx_embeds], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf9)
buf10 = buf5; del buf5 # reuse
buf11 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [node_embeds, node_embeds_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_3.run(primals_3, buf9, buf10, buf11, 16, grid=grid(16), stream=stream0)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [node_embeds, node_embeds_1], Original ATen: [aten.add, aten.native_layer_norm]
triton_poi_fused_add_native_layer_norm_4.run(primals_3, buf9, buf10, buf11, primals_8, primals_9, buf12, 64, grid=grid(64), stream=stream0)
del buf10
del buf11
del primals_9
return (buf12, primals_3, primals_8, buf3, buf6, reinterpret_tensor(buf8, (16, 4), (4, 1), 0), buf9, primals_7, reinterpret_tensor(buf0, (4, 4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
import torch.nn as nn
import torch.utils.data
class GCNLayer(nn.Module):
def __init__(self, embed_size, dropout=0.0):
super().__init__()
self.embed_size = embed_size
self.ctx_layer = nn.Linear(self.embed_size, self.embed_size, bias=False
)
self.layernorm = nn.LayerNorm(embed_size)
self.dropout = nn.Dropout(dropout)
def forward(self, node_fts, rel_edges):
"""Args:
node_fts: (batch_size, num_nodes, embed_size)
rel_edges: (batch_size, num_nodes, num_nodes)
"""
ctx_embeds = self.ctx_layer(torch.bmm(rel_edges, node_fts))
node_embeds = node_fts + self.dropout(ctx_embeds)
node_embeds = self.layernorm(node_embeds)
return node_embeds
class AttnGCNLayer(GCNLayer):
def __init__(self, embed_size, d_ff, dropout=0.0):
super().__init__(embed_size, dropout=dropout)
self.edge_attn_query = nn.Linear(embed_size, d_ff)
self.edge_attn_key = nn.Linear(embed_size, d_ff)
self.attn_denominator = math.sqrt(d_ff)
def forward(self, node_fts, rel_edges):
"""
Args:
node_fts: (batch_size, num_nodes, embed_size)
rel_edges: (batch_size, num_nodes, num_nodes)
"""
attn_scores = torch.einsum('bod,bid->boi', self.edge_attn_query(
node_fts), self.edge_attn_key(node_fts)) / self.attn_denominator
attn_scores = attn_scores.masked_fill(rel_edges == 0, -1e+18)
attn_scores = torch.softmax(attn_scores, dim=2)
attn_scores = attn_scores.masked_fill(rel_edges == 0, 0)
ctx_embeds = self.ctx_layer(torch.bmm(attn_scores, node_fts))
node_embeds = node_fts + self.dropout(ctx_embeds)
node_embeds = self.layernorm(node_embeds)
return node_embeds
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'embed_size': 4, 'd_ff': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_eq_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_1(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last').to(tl
.int1)
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp7 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp12 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
).to(tl.int1)
tmp17 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = -9.999999843067494e+17
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp8 = tmp7 * tmp2
tmp9 = tl.where(tmp6, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp13 = tmp12 * tmp2
tmp14 = tl.where(tmp11, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp18 = tmp17 * tmp2
tmp19 = tl.where(tmp16, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp5 - tmp20
tmp22 = tl_math.exp(tmp21)
tmp23 = tmp9 - tmp20
tmp24 = tl_math.exp(tmp23)
tmp25 = tmp22 + tmp24
tmp26 = tmp14 - tmp20
tmp27 = tl_math.exp(tmp26)
tmp28 = tmp25 + tmp27
tmp29 = tmp19 - tmp20
tmp30 = tl_math.exp(tmp29)
tmp31 = tmp28 + tmp30
tl.store(out_ptr0 + x0, tmp20, xmask)
tl.store(out_ptr1 + x0, tmp31, xmask)
@triton.jit
def triton_poi_fused__softmax_div_masked_fill_2(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask).to(tl.int1)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp6 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = -9.999999843067494e+17
tmp5 = tl.where(tmp0, tmp4, tmp3)
tmp7 = tmp5 - tmp6
tmp8 = tl_math.exp(tmp7)
tmp10 = tmp8 / tmp9
tmp11 = 0.0
tmp12 = tl.where(tmp0, tmp11, tmp10)
tl.store(in_out_ptr0 + x2, tmp10, xmask)
tl.store(out_ptr0 + x2, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_3(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = tmp27 / tmp15
tl.store(out_ptr0 + x0, tmp16, xmask)
tl.store(out_ptr1 + x0, tmp28, xmask)
@triton.jit
def triton_poi_fused_add_native_layer_norm_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr4 + x0, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 - tmp3
tmp6 = 1e-05
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp4 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_5, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf1)
del primals_4
del primals_5
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 4), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 4, 4), (16, 1, 4), 0), out=buf2)
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_eq_0[grid(64)](primals_6, buf3, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 1), (4, 1, 16), torch.float32)
triton_poi_fused__softmax_div_masked_fill_1[grid(16)](buf3, buf2,
buf4, buf5, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf6 = buf2
del buf2
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_div_masked_fill_2[grid(64)](buf6, buf3,
buf4, buf5, buf7, 64, XBLOCK=64, num_warps=1, num_stages=1)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(buf7, primals_3, out=buf8)
buf9 = reinterpret_tensor(buf7, (16, 4), (4, 1), 0)
del buf7
extern_kernels.mm(reinterpret_tensor(buf8, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf9)
buf10 = buf5
del buf5
buf11 = buf4
del buf4
triton_poi_fused_add_native_layer_norm_3[grid(16)](primals_3, buf9,
buf10, buf11, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_native_layer_norm_4[grid(64)](primals_3, buf9,
buf10, buf11, primals_8, primals_9, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf10
del buf11
del primals_9
return buf12, primals_3, primals_8, buf3, buf6, reinterpret_tensor(buf8,
(16, 4), (4, 1), 0), buf9, primals_7, reinterpret_tensor(buf0, (4,
4, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 4), (16, 4,
1), 0)
class GCNLayer(nn.Module):
def __init__(self, embed_size, dropout=0.0):
super().__init__()
self.embed_size = embed_size
self.ctx_layer = nn.Linear(self.embed_size, self.embed_size, bias=False
)
self.layernorm = nn.LayerNorm(embed_size)
self.dropout = nn.Dropout(dropout)
def forward(self, node_fts, rel_edges):
"""Args:
node_fts: (batch_size, num_nodes, embed_size)
rel_edges: (batch_size, num_nodes, num_nodes)
"""
ctx_embeds = self.ctx_layer(torch.bmm(rel_edges, node_fts))
node_embeds = node_fts + self.dropout(ctx_embeds)
node_embeds = self.layernorm(node_embeds)
return node_embeds
class AttnGCNLayerNew(GCNLayer):
def __init__(self, embed_size, d_ff, dropout=0.0):
super().__init__(embed_size, dropout=dropout)
self.edge_attn_query = nn.Linear(embed_size, d_ff)
self.edge_attn_key = nn.Linear(embed_size, d_ff)
self.attn_denominator = math.sqrt(d_ff)
def forward(self, input_0, input_1):
primals_1 = self.ctx_layer.weight
primals_2 = self.layernorm.weight
primals_5 = self.layernorm.bias
primals_4 = self.edge_attn_query.weight
primals_8 = self.edge_attn_query.bias
primals_7 = self.edge_attn_key.weight
primals_9 = self.edge_attn_key.bias
primals_3 = input_0
primals_6 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9])
return output[0]
|
Fenkail/hgr_v2t
|
AttnGCNLayer
| false | 13,696 |
[
"MIT"
] | 190 |
d8cc1c18cdaae54fd1878d6dc7b8e9c60d83fcbb
|
https://github.com/Fenkail/hgr_v2t/tree/d8cc1c18cdaae54fd1878d6dc7b8e9c60d83fcbb
|
Cartesian
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/v4/cv4ff55kvqufmlwbha3zwifl2iiainuigc3ecu7srjssgsdlhdbq.py
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
# Source node to ATen node mapping:
# stack => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%unsqueeze, %unsqueeze_1], -1), kwargs = {})
triton_poi_fused_stack_0 = async_compile.triton('triton_poi_fused_stack_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_stack_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4*x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp7 = tl_math.cos(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tmp12 = tl.full([1], 2, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tl.load(in_ptr0 + (4*x1), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (1 + (4*x1)), tmp11 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tl_math.sin(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + (x2), tmp20, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [stack], Original ATen: [aten.stack]
stream0 = get_raw_stream(0)
triton_poi_fused_stack_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
import torch.fft
class Cartesian(nn.Module):
def forward(self, x):
r, phi = x[..., 0], x[..., 1]
return torch.stack((r * torch.cos(phi), r * torch.sin(phi)), dim=-1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
import torch.utils.data
import torch.utils.data.distributed
import torch.optim
import torch.fft
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_stack_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + 4 * x1, tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp7 = tl_math.cos(tmp6)
tmp8 = tmp5 * tmp7
tmp9 = tl.full(tmp8.shape, 0.0, tmp8.dtype)
tmp10 = tl.where(tmp4, tmp8, tmp9)
tmp11 = tmp0 >= tmp3
tl.full([1], 2, tl.int64)
tmp14 = tl.load(in_ptr0 + 4 * x1, tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp15 = tl.load(in_ptr0 + (1 + 4 * x1), tmp11 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tl_math.sin(tmp15)
tmp17 = tmp14 * tmp16
tmp18 = tl.full(tmp17.shape, 0.0, tmp17.dtype)
tmp19 = tl.where(tmp11, tmp17, tmp18)
tmp20 = tl.where(tmp4, tmp10, tmp19)
tl.store(out_ptr0 + x2, tmp20, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_stack_0[grid(128)](arg0_1, buf0, 128, XBLOCK=128,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class CartesianNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Gaskell-1206/fastMRI
|
Cartesian
| false | 13,697 |
[
"MIT"
] | 815 |
1b6d1f9020bc9209afa65ef9b9f2f3fa3348901c
|
https://github.com/Gaskell-1206/fastMRI/tree/1b6d1f9020bc9209afa65ef9b9f2f3fa3348901c
|
LandmarkHead
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/u3/cu3litezfpnwhpnfnfuj6dtimz6ml42wmcwnwxlnovd4p5lvyin4.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (2097152*y1)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/sm/csmh6j2eewkdoozuncolx7k4amr2atfs4j3yxilmlmtxn6bpynuf.py
# Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view]
# Source node to ATen node mapping:
# out_1 => clone
# view => view
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %view : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [4, -1, 10]), kwargs = {})
triton_poi_fused_clone_view_1 = async_compile.triton('triton_poi_fused_clone_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_view_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 491520
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 30
tmp0 = tl.load(in_out_ptr0 + (x4), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x4), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (30, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (30, ), (1, ))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 2048, 4096, grid=grid(2048, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 30, 64, 64), (122880, 1, 1920, 30))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 30), (122880, 1920, 30, 1), 0); del buf1 # reuse
buf3 = reinterpret_tensor(buf2, (4, 12288, 10), (122880, 10, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view]
triton_poi_fused_clone_view_1.run(buf3, primals_2, 491520, grid=grid(491520), stream=stream0)
del primals_2
return (buf3, primals_1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((30, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((30, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from itertools import product as product
import torch.nn as nn
class LandmarkHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(LandmarkHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=
(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 10)
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from itertools import product as product
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 30
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (30, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (30,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 30, 64, 64), (122880, 1, 1920, 30))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 30), (122880, 1920, 30,
1), 0)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 12288, 10), (122880, 10, 1), 0)
del buf2
triton_poi_fused_clone_view_1[grid(491520)](buf3, primals_2, 491520,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_1, buf0
class LandmarkHeadNew(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(LandmarkHeadNew, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 10, kernel_size=
(1, 1), stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1x1.weight
primals_2 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Edward1900/Face-Detector-1MB-with-landmark
|
LandmarkHead
| false | 13,698 |
[
"MIT"
] | 907 |
16c16c4efa74b0264e0fd7fe0ddc0160f540a4bf
|
https://github.com/Edward1900/Face-Detector-1MB-with-landmark/tree/16c16c4efa74b0264e0fd7fe0ddc0160f540a4bf
|
TransformerNet
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/xi/cxi3ssslzv45liamqvbt6decmfms5gkzbjn7dtainfaa436qkyw3.py
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out => _unsafe_index, _unsafe_index_1
# Graph fragment:
# %_unsafe_index : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%primals_1, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_1 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_0 = async_compile.triton('triton_poi_fused_reflection_pad2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 62208
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 72
x1 = (xindex // 72) % 72
x2 = (xindex // 5184)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-4) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-4) + x1))))) + (4096*x2)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zp/czpuakvx3zciuzfmemejrltenkqbzqirfyy2fnfbmrorwkdndz6e.py
# Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# instance_norm => add, rsqrt, var_mean
# out_1 => convolution
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_1, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_red_fused__native_batch_norm_legit_convolution_1 = async_compile.triton('triton_red_fused__native_batch_norm_legit_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.reduction(
size_hints=[128, 4096],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_red_fused__native_batch_norm_legit_convolution_1', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr, RBLOCK : tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 32
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + (4096*x3)), rmask & xmask, eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = triton_helpers.welford_reduce(
tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + (4096*x3)), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(
tmp4_mean, tmp4_m2, tmp4_weight, 1
)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6 = tmp6_tmp[:, None]
tl.store(out_ptr0 + (x3), tmp4, xmask)
tmp7 = 4096.0
tmp8 = tmp5 / tmp7
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/in/ciny2bql3sygecchlvr6rxw73jnhl7dgi3s5w2g2fefaoug53zzz.py
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# instance_norm => repeat
# Graph fragment:
# %repeat : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_4, [4]), kwargs = {})
triton_poi_fused_repeat_2 = async_compile.triton('triton_poi_fused_repeat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 32), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ii/ciidusl6utkne6h3zmwx3jccsnttcsdc42mtp3vanldcnxv4y7ov.py
# Topologically Sorted Source Nodes: [y, out_2], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_2 => _unsafe_index_2, _unsafe_index_3
# y => relu
# Graph fragment:
# %relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %_unsafe_index_2 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu, [None, None, %sub_6, None]), kwargs = {})
# %_unsafe_index_3 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_2, [None, None, None, %sub_6]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_3 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 557568
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 66
x1 = (xindex // 66) % 66
x2 = (xindex // 4356)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-1) + x1))))) + (4096*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/si/csiohvngy3nd4p3av6rdkonvlcuns665sjcyq5ggukrhfwpso4ay.py
# Topologically Sorted Source Nodes: [out_3, instance_norm_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# instance_norm_1 => add_2, rsqrt_1, var_mean_1
# out_3 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_3, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_4 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[256, 1024],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_4', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_4(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 256
XBLOCK: tl.constexpr = 1
rnumel = 1024
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (r2 + (1024*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 1024, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 1024.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + (1024*x3)), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp20, None)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bo/cbop6byfkkzzjktajzua3ovnpvhy32nxb7dbv364jfeaxunlv7bo.py
# Topologically Sorted Source Nodes: [instance_norm_1], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# instance_norm_1 => repeat_2
# Graph fragment:
# %repeat_2 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_8, [4]), kwargs = {})
triton_poi_fused_repeat_5 = async_compile.triton('triton_poi_fused_repeat_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 64), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/k6/ck6ljtglelyaqir7indwg3cp4wwudzqtlaof4xfdlyasdzhka7z5.py
# Topologically Sorted Source Nodes: [y_1, out_4], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_4 => _unsafe_index_4, _unsafe_index_5
# y_1 => relu_1
# Graph fragment:
# %relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_3,), kwargs = {})
# %_unsafe_index_4 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_1, [None, None, %sub_11, None]), kwargs = {})
# %_unsafe_index_5 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_4, [None, None, None, %sub_11]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_6 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_6(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = (xindex // 34) % 34
x2 = (xindex // 1156)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0))))) + ((-32)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1))))) + (1024*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/b3/cb3i36nfih3ah5aifo46hyitngbbqmrioka4h7sa3nz2vzd5toin.py
# Topologically Sorted Source Nodes: [out_5, instance_norm_2, y_2], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu]
# Source node to ATen node mapping:
# instance_norm_2 => add_4, repeat_4, repeat_5, rsqrt_2, var_mean_2
# out_5 => convolution_2
# y_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_5, %primals_10, %primals_11, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %repeat_4 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_12, [4]), kwargs = {})
# %repeat_5 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_13, [4]), kwargs = {})
# %var_mean_2 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_4, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_4, 1e-05), kwargs = {})
# %rsqrt_2 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_4,), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_5,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: 'i32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr2, out_ptr3, xnumel, rnumel):
xnumel = 512
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x0 % 128), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x0 % 128), None, eviction_policy='evict_last')
tmp2 = tl.load(in_out_ptr0 + (r3 + (256*x0)), None)
tmp3 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = tl.broadcast_to(tmp5, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.full([1], 256, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp5 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp4 - tmp12
tmp24 = tmp23 * tmp22
tmp25 = tmp24 * tmp0
tmp26 = tmp25 + tmp1
tmp27 = tl.full([1], 0, tl.int32)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tl.store(out_ptr0 + (x0), tmp0, None)
tl.store(out_ptr1 + (x0), tmp1, None)
tl.store(in_out_ptr0 + (r3 + (256*x0)), tmp4, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x0), tmp22, None)
tl.store(out_ptr3 + (r3 + (256*x0)), tmp28, None)
tl.store(out_ptr2 + (x0), tmp12, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/st/cstfzn4z33vdn3t4r76kkdoe3fox63ob7zbuq5lr4e2aj2wo3cfw.py
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_6 => _unsafe_index_6, _unsafe_index_7
# Graph fragment:
# %_unsafe_index_6 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_2, [None, None, %sub_16, None]), kwargs = {})
# %_unsafe_index_7 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_6, [None, None, None, %sub_16]), kwargs = {})
triton_poi_fused_reflection_pad2d_8 = async_compile.triton('triton_poi_fused_reflection_pad2d_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_8(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = (xindex // 18) % 18
x2 = (xindex // 324)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (255 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0))))) + ((-16)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (256*x2)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (x3), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/e2/ce2xxpjelyctnuhefg5fuzcvwpa544akythto7ai5tgzpkjchqwu.py
# Topologically Sorted Source Nodes: [out_7, instance_norm_3], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# instance_norm_3 => add_6, rsqrt_3, var_mean_3
# out_7 => convolution_3
# Graph fragment:
# %convolution_3 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_7, %primals_14, %primals_15, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_3 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_6, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_6, 1e-05), kwargs = {})
# %rsqrt_3 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_6,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_9 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_9', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_9(in_out_ptr0, in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel):
xnumel = 512
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (r2 + (256*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + (256*x3)), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + (x3), tmp20, None)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/df/cdfz5yaux6hd3x6u7ywjjuon3rgwzpj6jchxqf6fmzsftmjj7luu.py
# Topologically Sorted Source Nodes: [instance_norm_3], Original ATen: [aten.repeat]
# Source node to ATen node mapping:
# instance_norm_3 => repeat_6
# Graph fragment:
# %repeat_6 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_16, [4]), kwargs = {})
triton_poi_fused_repeat_10 = async_compile.triton('triton_poi_fused_repeat_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_repeat_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_repeat_10(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0 % 128), xmask)
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/72/c72anaicoavbg3ypt27amkloa7kkqjupcqqr7kifcj4pxrdujccb.py
# Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_8 => relu_3
# out_9 => _unsafe_index_8, _unsafe_index_9
# Graph fragment:
# %relu_3 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_7,), kwargs = {})
# %_unsafe_index_8 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_3, [None, None, %sub_16, None]), kwargs = {})
# %_unsafe_index_9 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_8, [None, None, None, %sub_16]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_11 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_11(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 165888
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = (xindex // 18) % 18
x2 = (xindex // 324)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (255 + ((-1)*(tl_math.abs((-15) + (tl_math.abs((-1) + x0))))) + ((-16)*(tl_math.abs((-15) + (tl_math.abs((-1) + x1))))) + (256*x2)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ht/chtgxfnwsuka4dupubnxavhxnvwl72mb4ekz5zpamrm6tamf5fvv.py
# Topologically Sorted Source Nodes: [out_10, out_11, out_12], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
# Source node to ATen node mapping:
# out_10 => convolution_4
# out_11 => add_8, repeat_8, rsqrt_4, var_mean_4
# out_12 => add_10
# Graph fragment:
# %convolution_4 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_9, %primals_18, %primals_19, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %repeat_8 : [num_users=2] = call_function[target=torch.ops.aten.repeat.default](args = (%primals_20, [4]), kwargs = {})
# %var_mean_4 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_8, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_8, 1e-05), kwargs = {})
# %rsqrt_4 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_8,), kwargs = {})
# %add_10 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_9, %relu_2), kwargs = {})
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12 = async_compile.triton('triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12', 'mutated_arg_names': ['in_out_ptr0', 'in_out_ptr1'], 'no_x_dim': True, 'num_load': 5, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12(in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1, out_ptr3, xnumel, rnumel):
xnumel = 512
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x0 % 128), None, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + (256*x0)), None)
tmp2 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + (x1), None, eviction_policy='evict_last')
tmp27 = tl.load(in_out_ptr1 + (r3 + (256*x0)), None)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = tl.broadcast_to(tmp4, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.full([1], 256, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp4 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp3 - tmp11
tmp18 = 256.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp24 = tmp23 * tmp0
tmp26 = tmp24 + tmp25
tmp28 = tmp26 + tmp27
tl.store(out_ptr0 + (x0), tmp0, None)
tl.store(in_out_ptr0 + (r3 + (256*x0)), tmp3, None)
tl.store(in_out_ptr1 + (r3 + (256*x0)), tmp28, None)
tl.store(out_ptr3 + (x0), tmp22, None)
tl.store(out_ptr1 + (x0), tmp11, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/b3/cb3qjb4uid2oua44nvmn56hgg22nygnazgnt5dgu6oqhrcyphjio.py
# Topologically Sorted Source Nodes: [out_38, out_39], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# out_38 => convolution_12
# out_39 => add_28, rsqrt_12, var_mean_12
# Graph fragment:
# %convolution_12 : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_25, %primals_50, %primals_51, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %var_mean_12 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_24, [0, 2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_24, 1e-05), kwargs = {})
# %rsqrt_12 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_28,), kwargs = {})
triton_per_fused__native_batch_norm_legit_convolution_13 = async_compile.triton('triton_per_fused__native_batch_norm_legit_convolution_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[512, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_convolution_13', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_13(in_out_ptr0, in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
xnumel = 512
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (r2 + (256*x3)), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + (256*x3)), tmp2, None)
tl.store(out_ptr2 + (x3), tmp20, None)
tl.store(out_ptr0 + (x3), tmp10, None)
tl.store(out_ptr1 + (x3), tmp15, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qq/cqqyalirw6ktpkb7ck6op5kn5slga5gde6ffhveztg3zuk5kgxda.py
# Topologically Sorted Source Nodes: [x_in], Original ATen: [aten.arange]
# Source node to ATen node mapping:
# x_in => iota_26
# Graph fragment:
# %iota_26 : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
triton_poi_fused_arange_14 = async_compile.triton('triton_poi_fused_arange_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_14(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/r3/cr3jbyf5ylpcnip7fl3i4e3dqhcl5pfkrdyzumgnsa2b4past5le.py
# Topologically Sorted Source Nodes: [x_in], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_in => add_31, add_32, convert_element_type, convert_element_type_1, mul_26, mul_27
# Graph fragment:
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_26, 1), kwargs = {})
# %add_31 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_26, 0), kwargs = {})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_31, torch.float32), kwargs = {})
# %add_32 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.0), kwargs = {})
# %mul_27 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_32, 0.5), kwargs = {})
# %convert_element_type_1 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_27, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_15 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_15', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_15(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4u/c4ulzdc64ey5bpk3wpc3vnmalkhaekirwmcugkiy5azetn32lzqc.py
# Topologically Sorted Source Nodes: [out_40, x_in, out_41], Original ATen: [aten.add, aten._unsafe_index, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_40 => add_30
# out_41 => _unsafe_index_27, _unsafe_index_28
# x_in => _unsafe_index_26
# Graph fragment:
# %add_30 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_25, %add_25), kwargs = {})
# %_unsafe_index_26 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%add_30, [None, None, %unsqueeze_52, %convert_element_type_1]), kwargs = {})
# %_unsafe_index_27 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_26, [None, None, %sub_11, None]), kwargs = {})
# %_unsafe_index_28 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_27, [None, None, None, %sub_11]), kwargs = {})
triton_poi_fused__unsafe_index_add_reflection_pad2d_16 = async_compile.triton('triton_poi_fused__unsafe_index_add_reflection_pad2d_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_reflection_pad2d_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_reflection_pad2d_16(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 591872
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 34) % 34
x0 = xindex % 34
x4 = (xindex // 1156)
x2 = (xindex // 1156) % 128
x7 = xindex
tmp0 = tl.load(in_ptr0 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x1)))))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (31 + ((-1)*(tl_math.abs((-31) + (tl_math.abs((-1) + x0)))))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x4), None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x4), None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr4 + (x4), None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + (x2), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (16*tmp4) + (256*x4)), None, eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = 256.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.rsqrt(tmp16)
tmp18 = tmp11 * tmp17
tmp20 = tmp18 * tmp19
tmp22 = tmp20 + tmp21
tmp23 = tl.load(in_ptr6 + (tmp8 + (16*tmp4) + (256*x4)), None, eviction_policy='evict_last')
tmp24 = tmp22 + tmp23
tl.store(out_ptr0 + (x7), tmp24, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dq/cdq7haid5a5j3lkr5pvfwpau3a4evwh5wu6wzw4wsmw3e4ska5zp.py
# Topologically Sorted Source Nodes: [x_in_1], Original ATen: [aten.arange]
# Source node to ATen node mapping:
# x_in_1 => iota_30
# Graph fragment:
# %iota_30 : [num_users=2] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
triton_poi_fused_arange_17 = async_compile.triton('triton_poi_fused_arange_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_arange_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_arange_17(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + (x0), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lc/clcrsu5s34immb6guobkppggbuvqp4z4ceacadyjt2r2vb5cnfrr.py
# Topologically Sorted Source Nodes: [x_in_1], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
# Source node to ATen node mapping:
# x_in_1 => add_37, add_38, convert_element_type_4, convert_element_type_5, mul_32, mul_33
# Graph fragment:
# %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%iota_30, 1), kwargs = {})
# %add_37 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_32, 0), kwargs = {})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%add_37, torch.float32), kwargs = {})
# %add_38 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.0), kwargs = {})
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_38, 0.5), kwargs = {})
# %convert_element_type_5 : [num_users=3] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%mul_33, torch.int64), kwargs = {})
triton_poi_fused__to_copy_add_arange_mul_18 = async_compile.triton('triton_poi_fused__to_copy_add_arange_mul_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_mul_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_18(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wm/cwmojiabjtl2ol57sxtgs6t2ik45zfe3nj5ahimvzp7to4pegq4y.py
# Topologically Sorted Source Nodes: [y_3, x_in_1, out_43], Original ATen: [aten.relu, aten._unsafe_index, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_43 => _unsafe_index_30, _unsafe_index_31
# x_in_1 => _unsafe_index_29
# y_3 => relu_8
# Graph fragment:
# %relu_8 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_27,), kwargs = {})
# %_unsafe_index_29 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_8, [None, None, %unsqueeze_57, %convert_element_type_5]), kwargs = {})
# %_unsafe_index_30 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_29, [None, None, %sub_6, None]), kwargs = {})
# %_unsafe_index_31 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_30, [None, None, None, %sub_6]), kwargs = {})
triton_poi_fused__unsafe_index_reflection_pad2d_relu_19 = async_compile.triton('triton_poi_fused__unsafe_index_reflection_pad2d_relu_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2097152],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_reflection_pad2d_relu_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_reflection_pad2d_relu_19(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 66) % 66
x0 = xindex % 66
x2 = (xindex // 4356)
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x1)))))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-1) + x0)))))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (x2), xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + (x2), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + (32*tmp4) + (1024*x2)), xmask, eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 * tmp14
tmp17 = tmp15 + tmp16
tmp18 = tl.full([1], 0, tl.int32)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tl.store(out_ptr0 + (x5), tmp19, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/th/cthl4msq2bdgpn742l3webz5mqwgninyvmg573gu2uxszfsmpn4m.py
# Topologically Sorted Source Nodes: [y_4, out_45], Original ATen: [aten.relu, aten.reflection_pad2d]
# Source node to ATen node mapping:
# out_45 => _unsafe_index_32, _unsafe_index_33
# y_4 => relu_9
# Graph fragment:
# %relu_9 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%view_29,), kwargs = {})
# %_unsafe_index_32 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%relu_9, [None, None, %sub_1, None]), kwargs = {})
# %_unsafe_index_33 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%_unsafe_index_32, [None, None, None, %sub_1]), kwargs = {})
triton_poi_fused_reflection_pad2d_relu_20 = async_compile.triton('triton_poi_fused_reflection_pad2d_relu_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_reflection_pad2d_relu_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_20(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 663552
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 72
x1 = (xindex // 72) % 72
x2 = (xindex // 5184)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + ((-1)*(tl_math.abs((-63) + (tl_math.abs((-4) + x0))))) + ((-64)*(tl_math.abs((-63) + (tl_math.abs((-4) + x1))))) + (4096*x2)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x2), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (x2), None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + (x3), tmp10, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/e7/ce74uqtoket5nfthmxg424ua6qpeecce5sbwlb43qck4fh7zcxd5.py
# Topologically Sorted Source Nodes: [out_46], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# out_46 => convolution_15
# Graph fragment:
# %convolution_15 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%_unsafe_index_33, %primals_62, %primals_63, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_21 = async_compile.triton('triton_poi_fused_convolution_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_21', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 49152
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x3), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63 = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (32, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_3, (32, ), (1, ))
assert_size_stride(primals_4, (32, ), (1, ))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, ), (1, ))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (128, ), (1, ))
assert_size_stride(primals_13, (128, ), (1, ))
assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (128, ), (1, ))
assert_size_stride(primals_16, (128, ), (1, ))
assert_size_stride(primals_17, (128, ), (1, ))
assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (128, ), (1, ))
assert_size_stride(primals_20, (128, ), (1, ))
assert_size_stride(primals_21, (128, ), (1, ))
assert_size_stride(primals_22, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_23, (128, ), (1, ))
assert_size_stride(primals_24, (128, ), (1, ))
assert_size_stride(primals_25, (128, ), (1, ))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128, ), (1, ))
assert_size_stride(primals_28, (128, ), (1, ))
assert_size_stride(primals_29, (128, ), (1, ))
assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (128, ), (1, ))
assert_size_stride(primals_32, (128, ), (1, ))
assert_size_stride(primals_33, (128, ), (1, ))
assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_35, (128, ), (1, ))
assert_size_stride(primals_36, (128, ), (1, ))
assert_size_stride(primals_37, (128, ), (1, ))
assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (128, ), (1, ))
assert_size_stride(primals_40, (128, ), (1, ))
assert_size_stride(primals_41, (128, ), (1, ))
assert_size_stride(primals_42, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_43, (128, ), (1, ))
assert_size_stride(primals_44, (128, ), (1, ))
assert_size_stride(primals_45, (128, ), (1, ))
assert_size_stride(primals_46, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_47, (128, ), (1, ))
assert_size_stride(primals_48, (128, ), (1, ))
assert_size_stride(primals_49, (128, ), (1, ))
assert_size_stride(primals_50, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_51, (128, ), (1, ))
assert_size_stride(primals_52, (128, ), (1, ))
assert_size_stride(primals_53, (128, ), (1, ))
assert_size_stride(primals_54, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_55, (64, ), (1, ))
assert_size_stride(primals_56, (64, ), (1, ))
assert_size_stride(primals_57, (64, ), (1, ))
assert_size_stride(primals_58, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_59, (32, ), (1, ))
assert_size_stride(primals_60, (32, ), (1, ))
assert_size_stride(primals_61, (32, ), (1, ))
assert_size_stride(primals_62, (3, 32, 9, 9), (2592, 81, 9, 1))
assert_size_stride(primals_63, (3, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 72, 72), (15552, 5184, 72, 1), torch.float32)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.reflection_pad2d]
stream0 = get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0.run(primals_1, buf0, 62208, grid=grid(62208), stream=stream0)
del primals_1
# Topologically Sorted Source Nodes: [out_1], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf2 = buf1; del buf1 # reuse
buf5 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32)
buf6 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf8 = reinterpret_tensor(buf6, (1, 128, 1, 1), (128, 1, 1, 1), 0); del buf6 # reuse
# Topologically Sorted Source Nodes: [out_1, instance_norm], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_convolution_1.run(buf2, buf8, primals_3, buf5, 128, 4096, grid=grid(128), stream=stream0)
del primals_3
buf3 = empty_strided_cuda((128, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_4, buf3, 128, grid=grid(128), stream=stream0)
del primals_4
buf4 = empty_strided_cuda((128, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_5, buf4, 128, grid=grid(128), stream=stream0)
del primals_5
buf9 = empty_strided_cuda((4, 32, 66, 66), (139392, 4356, 66, 1), torch.float32)
# Topologically Sorted Source Nodes: [y, out_2], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_3.run(buf2, buf5, buf8, buf3, buf4, buf9, 557568, grid=grid(557568), stream=stream0)
# Topologically Sorted Source Nodes: [out_3], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf11 = buf10; del buf10 # reuse
buf14 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.float32)
buf15 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf17 = reinterpret_tensor(buf15, (1, 256, 1, 1), (256, 1, 1, 1), 0); del buf15 # reuse
# Topologically Sorted Source Nodes: [out_3, instance_norm_1], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_4.run(buf11, buf17, primals_7, buf14, 256, 1024, grid=grid(256), stream=stream0)
del primals_7
buf12 = empty_strided_cuda((256, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_1], Original ATen: [aten.repeat]
triton_poi_fused_repeat_5.run(primals_8, buf12, 256, grid=grid(256), stream=stream0)
del primals_8
buf13 = empty_strided_cuda((256, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_1], Original ATen: [aten.repeat]
triton_poi_fused_repeat_5.run(primals_9, buf13, 256, grid=grid(256), stream=stream0)
del primals_9
buf18 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_1, out_4], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_6.run(buf11, buf14, buf17, buf12, buf13, buf18, 295936, grid=grid(295936), stream=stream0)
# Topologically Sorted Source Nodes: [out_5], Original ATen: [aten.convolution]
buf19 = extern_kernels.convolution(buf18, primals_10, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1))
buf21 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf22 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf20 = buf19; del buf19 # reuse
buf23 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf24 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf26 = reinterpret_tensor(buf24, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf24 # reuse
buf27 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_5, instance_norm_2, y_2], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.relu]
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7.run(buf20, buf26, primals_12, primals_13, primals_11, buf21, buf22, buf23, buf27, 512, 256, grid=grid(512), stream=stream0)
del primals_11
del primals_12
del primals_13
buf28 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_6], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_8.run(buf27, buf28, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [out_7], Original ATen: [aten.convolution]
buf29 = extern_kernels.convolution(buf28, primals_14, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 128, 16, 16), (32768, 256, 16, 1))
buf30 = buf29; del buf29 # reuse
buf33 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf34 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf36 = reinterpret_tensor(buf34, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf34 # reuse
# Topologically Sorted Source Nodes: [out_7, instance_norm_3], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_9.run(buf30, buf36, primals_15, buf33, 512, 256, grid=grid(512), stream=stream0)
del primals_15
buf31 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_3], Original ATen: [aten.repeat]
triton_poi_fused_repeat_10.run(primals_16, buf31, 512, grid=grid(512), stream=stream0)
del primals_16
buf32 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_3], Original ATen: [aten.repeat]
triton_poi_fused_repeat_10.run(primals_17, buf32, 512, grid=grid(512), stream=stream0)
del primals_17
buf37 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_8, out_9], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_11.run(buf30, buf33, buf36, buf31, buf32, buf37, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [out_10], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_18, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 128, 16, 16), (32768, 256, 16, 1))
buf40 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf39 = buf38; del buf38 # reuse
buf41 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf45 = buf27; del buf27 # reuse
buf44 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [out_10, out_11, out_12], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12.run(buf39, buf45, primals_20, primals_19, primals_21, buf40, buf41, buf44, 512, 256, grid=grid(512), stream=stream0)
del primals_19
del primals_20
del primals_21
buf46 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_13], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_8.run(buf45, buf46, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [out_14], Original ATen: [aten.convolution]
buf47 = extern_kernels.convolution(buf46, primals_22, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 128, 16, 16), (32768, 256, 16, 1))
buf48 = buf47; del buf47 # reuse
buf51 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf52 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf54 = reinterpret_tensor(buf52, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf52 # reuse
# Topologically Sorted Source Nodes: [out_14, instance_norm_5], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_9.run(buf48, buf54, primals_23, buf51, 512, 256, grid=grid(512), stream=stream0)
del primals_23
buf49 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_5], Original ATen: [aten.repeat]
triton_poi_fused_repeat_10.run(primals_24, buf49, 512, grid=grid(512), stream=stream0)
del primals_24
buf50 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_5], Original ATen: [aten.repeat]
triton_poi_fused_repeat_10.run(primals_25, buf50, 512, grid=grid(512), stream=stream0)
del primals_25
buf55 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_15, out_16], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_11.run(buf48, buf51, buf54, buf49, buf50, buf55, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [out_17], Original ATen: [aten.convolution]
buf56 = extern_kernels.convolution(buf55, primals_26, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 128, 16, 16), (32768, 256, 16, 1))
buf58 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf57 = buf56; del buf56 # reuse
buf59 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf63 = buf45; del buf45 # reuse
buf62 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [out_17, out_18, out_19], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12.run(buf57, buf63, primals_28, primals_27, primals_29, buf58, buf59, buf62, 512, 256, grid=grid(512), stream=stream0)
del primals_27
del primals_28
del primals_29
buf64 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_20], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_8.run(buf63, buf64, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [out_21], Original ATen: [aten.convolution]
buf65 = extern_kernels.convolution(buf64, primals_30, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf65, (4, 128, 16, 16), (32768, 256, 16, 1))
buf66 = buf65; del buf65 # reuse
buf69 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf70 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf72 = reinterpret_tensor(buf70, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf70 # reuse
# Topologically Sorted Source Nodes: [out_21, instance_norm_7], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_9.run(buf66, buf72, primals_31, buf69, 512, 256, grid=grid(512), stream=stream0)
del primals_31
buf67 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_7], Original ATen: [aten.repeat]
triton_poi_fused_repeat_10.run(primals_32, buf67, 512, grid=grid(512), stream=stream0)
del primals_32
buf68 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_7], Original ATen: [aten.repeat]
triton_poi_fused_repeat_10.run(primals_33, buf68, 512, grid=grid(512), stream=stream0)
del primals_33
buf73 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_22, out_23], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_11.run(buf66, buf69, buf72, buf67, buf68, buf73, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [out_24], Original ATen: [aten.convolution]
buf74 = extern_kernels.convolution(buf73, primals_34, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf74, (4, 128, 16, 16), (32768, 256, 16, 1))
buf76 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf75 = buf74; del buf74 # reuse
buf77 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf81 = buf63; del buf63 # reuse
buf80 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [out_24, out_25, out_26], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12.run(buf75, buf81, primals_36, primals_35, primals_37, buf76, buf77, buf80, 512, 256, grid=grid(512), stream=stream0)
del primals_35
del primals_36
del primals_37
buf82 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_27], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_8.run(buf81, buf82, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [out_28], Original ATen: [aten.convolution]
buf83 = extern_kernels.convolution(buf82, primals_38, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1))
buf84 = buf83; del buf83 # reuse
buf87 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf88 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf90 = reinterpret_tensor(buf88, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf88 # reuse
# Topologically Sorted Source Nodes: [out_28, instance_norm_9], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_9.run(buf84, buf90, primals_39, buf87, 512, 256, grid=grid(512), stream=stream0)
del primals_39
buf85 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_9], Original ATen: [aten.repeat]
triton_poi_fused_repeat_10.run(primals_40, buf85, 512, grid=grid(512), stream=stream0)
del primals_40
buf86 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_9], Original ATen: [aten.repeat]
triton_poi_fused_repeat_10.run(primals_41, buf86, 512, grid=grid(512), stream=stream0)
del primals_41
buf91 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_29, out_30], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_11.run(buf84, buf87, buf90, buf85, buf86, buf91, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [out_31], Original ATen: [aten.convolution]
buf92 = extern_kernels.convolution(buf91, primals_42, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf92, (4, 128, 16, 16), (32768, 256, 16, 1))
buf94 = empty_strided_cuda((512, ), (1, ), torch.float32)
buf93 = buf92; del buf92 # reuse
buf95 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf99 = buf81; del buf81 # reuse
buf98 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [out_31, out_32, out_33], Original ATen: [aten.convolution, aten.repeat, aten._native_batch_norm_legit, aten.add]
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12.run(buf93, buf99, primals_44, primals_43, primals_45, buf94, buf95, buf98, 512, 256, grid=grid(512), stream=stream0)
del primals_43
del primals_44
del primals_45
buf100 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_34], Original ATen: [aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_8.run(buf99, buf100, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [out_35], Original ATen: [aten.convolution]
buf101 = extern_kernels.convolution(buf100, primals_46, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 128, 16, 16), (32768, 256, 16, 1))
buf102 = buf101; del buf101 # reuse
buf105 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.float32)
buf106 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf108 = reinterpret_tensor(buf106, (1, 512, 1, 1), (512, 1, 1, 1), 0); del buf106 # reuse
# Topologically Sorted Source Nodes: [out_35, instance_norm_11], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_9.run(buf102, buf108, primals_47, buf105, 512, 256, grid=grid(512), stream=stream0)
del primals_47
buf103 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_11], Original ATen: [aten.repeat]
triton_poi_fused_repeat_10.run(primals_48, buf103, 512, grid=grid(512), stream=stream0)
del primals_48
buf104 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_11], Original ATen: [aten.repeat]
triton_poi_fused_repeat_10.run(primals_49, buf104, 512, grid=grid(512), stream=stream0)
del primals_49
buf109 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_36, out_37], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_11.run(buf102, buf105, buf108, buf103, buf104, buf109, 165888, grid=grid(165888), stream=stream0)
# Topologically Sorted Source Nodes: [out_38], Original ATen: [aten.convolution]
buf110 = extern_kernels.convolution(buf109, primals_50, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf110, (4, 128, 16, 16), (32768, 256, 16, 1))
buf111 = buf110; del buf110 # reuse
buf113 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf114 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
buf116 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512), torch.float32)
# Topologically Sorted Source Nodes: [out_38, out_39], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_13.run(buf111, primals_51, buf113, buf114, buf116, 512, 256, grid=grid(512), stream=stream0)
del primals_51
buf112 = empty_strided_cuda((512, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [out_39], Original ATen: [aten.repeat]
triton_poi_fused_repeat_10.run(primals_52, buf112, 512, grid=grid(512), stream=stream0)
del primals_52
buf117 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_in], Original ATen: [aten.arange]
triton_poi_fused_arange_14.run(buf117, 32, grid=grid(32), stream=stream0)
buf118 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_in], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_15.run(buf118, 32, grid=grid(32), stream=stream0)
buf119 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1), torch.float32)
# Topologically Sorted Source Nodes: [out_40, x_in, out_41], Original ATen: [aten.add, aten._unsafe_index, aten.reflection_pad2d]
triton_poi_fused__unsafe_index_add_reflection_pad2d_16.run(buf118, buf111, buf113, buf114, buf112, primals_53, buf99, buf119, 591872, grid=grid(591872), stream=stream0)
del buf114
del buf99
del primals_53
# Topologically Sorted Source Nodes: [out_42], Original ATen: [aten.convolution]
buf120 = extern_kernels.convolution(buf119, primals_54, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf121 = buf120; del buf120 # reuse
buf124 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.float32)
buf125 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256), torch.float32)
buf127 = reinterpret_tensor(buf125, (1, 256, 1, 1), (256, 1, 1, 1), 0); del buf125 # reuse
# Topologically Sorted Source Nodes: [out_42, instance_norm_13], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_per_fused__native_batch_norm_legit_convolution_4.run(buf121, buf127, primals_55, buf124, 256, 1024, grid=grid(256), stream=stream0)
del primals_55
buf122 = empty_strided_cuda((256, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_13], Original ATen: [aten.repeat]
triton_poi_fused_repeat_5.run(primals_56, buf122, 256, grid=grid(256), stream=stream0)
del primals_56
buf123 = empty_strided_cuda((256, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_13], Original ATen: [aten.repeat]
triton_poi_fused_repeat_5.run(primals_57, buf123, 256, grid=grid(256), stream=stream0)
del primals_57
buf128 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_in_1], Original ATen: [aten.arange]
triton_poi_fused_arange_17.run(buf128, 64, grid=grid(64), stream=stream0)
buf129 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_in_1], Original ATen: [aten.arange, aten.add, aten.mul, aten._to_copy]
triton_poi_fused__to_copy_add_arange_mul_18.run(buf129, 64, grid=grid(64), stream=stream0)
buf130 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_3, x_in_1, out_43], Original ATen: [aten.relu, aten._unsafe_index, aten.reflection_pad2d]
triton_poi_fused__unsafe_index_reflection_pad2d_relu_19.run(buf129, buf121, buf124, buf127, buf122, buf123, buf130, 1115136, grid=grid(1115136), stream=stream0)
# Topologically Sorted Source Nodes: [out_44], Original ATen: [aten.convolution]
buf131 = extern_kernels.convolution(buf130, primals_58, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf131, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf132 = buf131; del buf131 # reuse
buf135 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32)
buf136 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch.float32)
buf138 = reinterpret_tensor(buf136, (1, 128, 1, 1), (128, 1, 1, 1), 0); del buf136 # reuse
# Topologically Sorted Source Nodes: [out_44, instance_norm_14], Original ATen: [aten.convolution, aten._native_batch_norm_legit]
triton_red_fused__native_batch_norm_legit_convolution_1.run(buf132, buf138, primals_59, buf135, 128, 4096, grid=grid(128), stream=stream0)
del primals_59
buf133 = empty_strided_cuda((128, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_14], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_60, buf133, 128, grid=grid(128), stream=stream0)
del primals_60
buf134 = empty_strided_cuda((128, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm_14], Original ATen: [aten.repeat]
triton_poi_fused_repeat_2.run(primals_61, buf134, 128, grid=grid(128), stream=stream0)
del primals_61
buf139 = empty_strided_cuda((4, 32, 72, 72), (165888, 5184, 72, 1), torch.float32)
# Topologically Sorted Source Nodes: [y_4, out_45], Original ATen: [aten.relu, aten.reflection_pad2d]
triton_poi_fused_reflection_pad2d_relu_20.run(buf132, buf135, buf138, buf133, buf134, buf139, 663552, grid=grid(663552), stream=stream0)
# Topologically Sorted Source Nodes: [out_46], Original ATen: [aten.convolution]
buf140 = extern_kernels.convolution(buf139, primals_62, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf140, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf141 = buf140; del buf140 # reuse
# Topologically Sorted Source Nodes: [out_46], Original ATen: [aten.convolution]
triton_poi_fused_convolution_21.run(buf141, primals_63, 49152, grid=grid(49152), stream=stream0)
del primals_63
return (buf141, primals_2, primals_6, primals_10, primals_14, primals_18, primals_22, primals_26, primals_30, primals_34, primals_38, primals_42, primals_46, primals_50, primals_54, primals_58, primals_62, buf0, buf2, buf3, buf4, buf5, buf8, buf9, buf11, buf12, buf13, buf14, buf17, buf18, buf20, buf21, buf22, buf23, buf26, buf28, buf30, buf31, buf32, buf33, buf36, buf37, buf39, buf40, reinterpret_tensor(buf44, (512, ), (1, ), 0), buf46, buf48, buf49, buf50, buf51, buf54, buf55, buf57, buf58, reinterpret_tensor(buf62, (512, ), (1, ), 0), buf64, buf66, buf67, buf68, buf69, buf72, buf73, buf75, buf76, reinterpret_tensor(buf80, (512, ), (1, ), 0), buf82, buf84, buf85, buf86, buf87, buf90, buf91, buf93, buf94, reinterpret_tensor(buf98, (512, ), (1, ), 0), buf100, buf102, buf103, buf104, buf105, buf108, buf109, buf111, buf112, reinterpret_tensor(buf116, (512, ), (1, ), 0), buf117, buf118, buf119, buf121, buf122, buf123, buf124, buf127, buf128, buf129, buf130, buf132, buf133, buf134, buf135, buf138, buf139, reinterpret_tensor(buf113, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf95, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf77, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf59, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf41, (1, 512, 1, 1), (512, 1, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 3, 64, 64), (12288, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, 3, 9, 9), (243, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_48 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_49 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_50 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_51 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_52 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_53 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_54 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_55 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_56 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_57 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_58 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_59 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_60 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_61 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_62 = rand_strided((3, 32, 9, 9), (2592, 81, 9, 1), device='cuda:0', dtype=torch.float32)
primals_63 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47, primals_48, primals_49, primals_50, primals_51, primals_52, primals_53, primals_54, primals_55, primals_56, primals_57, primals_58, primals_59, primals_60, primals_61, primals_62, primals_63])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = torch.nn.functional.interpolate(x_in, mode='nearest',
scale_factor=self.upsample)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
class TransformerNet(torch.nn.Module):
"""
From https://github.com/pytorch/examples/blob/master/fast_neural_style/neural_style/transformer_net.py
"""
def __init__(self):
super(TransformerNet, self).__init__()
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = torch.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = torch.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = torch.nn.InstanceNorm2d(128, affine=True)
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1,
upsample=2)
self.in4 = torch.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1,
upsample=2)
self.in5 = torch.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
self.relu = torch.nn.ReLU()
def forward(self, X):
y = self.relu(self.in1(self.conv1(X)))
y = self.relu(self.in2(self.conv2(y)))
y = self.relu(self.in3(self.conv3(y)))
y = self.res1(y)
y = self.res2(y)
y = self.res3(y)
y = self.res4(y)
y = self.res5(y)
y = self.relu(self.in4(self.deconv1(y)))
y = self.relu(self.in5(self.deconv2(y)))
y = self.deconv3(y)
return y
def get_inputs():
return [torch.rand([4, 3, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_reflection_pad2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 62208
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 72
x1 = xindex // 72 % 72
x2 = xindex // 5184
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-4 +
x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-4 + x1)) + 4096 * x2),
xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, xmask)
@triton.jit
def triton_red_fused__native_batch_norm_legit_convolution_1(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel, XBLOCK: tl.constexpr,
RBLOCK: tl.constexpr):
xnumel = 128
rnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rbase = tl.arange(0, RBLOCK)[None, :]
x3 = xindex
x0 = xindex % 32
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp4_mean = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_m2 = tl.zeros([XBLOCK, RBLOCK], tl.float32)
tmp4_weight = tl.zeros([XBLOCK, RBLOCK], tl.float32)
for roffset in range(0, rnumel, RBLOCK):
rindex = roffset + rbase
rmask = rindex < rnumel
r2 = rindex
tmp0 = tl.load(in_out_ptr0 + (r2 + 4096 * x3), rmask & xmask,
eviction_policy='evict_first', other=0.0)
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [XBLOCK, RBLOCK])
tmp4_mean_next, tmp4_m2_next, tmp4_weight_next = (triton_helpers.
welford_reduce(tmp3, tmp4_mean, tmp4_m2, tmp4_weight, roffset == 0)
)
tmp4_mean = tl.where(rmask & xmask, tmp4_mean_next, tmp4_mean)
tmp4_m2 = tl.where(rmask & xmask, tmp4_m2_next, tmp4_m2)
tmp4_weight = tl.where(rmask & xmask, tmp4_weight_next, tmp4_weight)
tl.store(in_out_ptr0 + (r2 + 4096 * x3), tmp2, rmask & xmask)
tmp4_tmp, tmp5_tmp, tmp6_tmp = triton_helpers.welford(tmp4_mean,
tmp4_m2, tmp4_weight, 1)
tmp4 = tmp4_tmp[:, None]
tmp5 = tmp5_tmp[:, None]
tmp6_tmp[:, None]
tl.store(out_ptr0 + x3, tmp4, xmask)
tmp7 = 4096.0
tmp8 = tmp5 / tmp7
tmp9 = 1e-05
tmp10 = tmp8 + tmp9
tmp11 = libdevice.rsqrt(tmp10)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp11, xmask)
@triton.jit
def triton_poi_fused_repeat_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 32, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_3(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 557568
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 66
x1 = xindex // 66 % 66
x2 = xindex // 4356
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-1 +
x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-1 + x1)) + 4096 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_4(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (r2 + 1024 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 1024, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 1024.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + 1024 * x3), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp20, None)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused_repeat_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 64, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_6(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 295936
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 34
x1 = xindex // 34 % 34
x2 = xindex // 1156
x3 = xindex
tmp0 = tl.load(in_ptr0 + (1023 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0)) + -32 * tl_math.abs(-31 + tl_math.abs(-1 + x1)) + 1024 * x2),
xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, xmask)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7(
in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1,
out_ptr2, out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 128
tmp0 = tl.load(in_ptr0 + x0 % 128, None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x0 % 128, None, eviction_policy='evict_last')
tmp2 = tl.load(in_out_ptr0 + (r3 + 256 * x0), None)
tmp3 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp5 = tl.broadcast_to(tmp4, [RBLOCK])
tmp7 = tl.broadcast_to(tmp5, [RBLOCK])
tmp9 = triton_helpers.promote_to_tensor(tl.sum(tmp7, 0))
tmp10 = tl.full([1], 256, tl.int32)
tmp11 = tmp10.to(tl.float32)
tmp12 = tmp9 / tmp11
tmp13 = tmp5 - tmp12
tmp14 = tmp13 * tmp13
tmp15 = tl.broadcast_to(tmp14, [RBLOCK])
tmp17 = triton_helpers.promote_to_tensor(tl.sum(tmp15, 0))
tmp18 = 256.0
tmp19 = tmp17 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp4 - tmp12
tmp24 = tmp23 * tmp22
tmp25 = tmp24 * tmp0
tmp26 = tmp25 + tmp1
tmp27 = tl.full([1], 0, tl.int32)
tmp28 = triton_helpers.maximum(tmp27, tmp26)
tl.store(out_ptr0 + x0, tmp0, None)
tl.store(out_ptr1 + x0, tmp1, None)
tl.store(in_out_ptr0 + (r3 + 256 * x0), tmp4, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x0, tmp22, None)
tl.store(out_ptr3 + (r3 + 256 * x0), tmp28, None)
tl.store(out_ptr2 + x0, tmp12, None)
@triton.jit
def triton_poi_fused_reflection_pad2d_8(in_ptr0, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = xindex // 18 % 18
x2 = xindex // 324
x3 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2),
None, eviction_policy='evict_last')
tl.store(out_ptr0 + x3, tmp0, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_9(in_out_ptr0,
in_out_ptr1, in_ptr0, out_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None)
tl.debug_barrier()
tl.store(in_out_ptr1 + x3, tmp20, None)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused_repeat_10(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0 % 128, xmask)
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_11(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 18
x1 = xindex // 18 % 18
x2 = xindex // 324
x3 = xindex
tmp0 = tl.load(in_ptr0 + (255 + -1 * tl_math.abs(-15 + tl_math.abs(-1 +
x0)) + -16 * tl_math.abs(-15 + tl_math.abs(-1 + x1)) + 256 * x2),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12(
in_out_ptr0, in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr1,
out_ptr3, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
x0 = xindex
r3 = rindex
x1 = xindex % 128
tmp0 = tl.load(in_ptr0 + x0 % 128, None, eviction_policy='evict_last')
tmp1 = tl.load(in_out_ptr0 + (r3 + 256 * x0), None)
tmp2 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr2 + x1, None, eviction_policy='evict_last')
tmp27 = tl.load(in_out_ptr1 + (r3 + 256 * x0), None)
tmp3 = tmp1 + tmp2
tmp4 = tl.broadcast_to(tmp3, [RBLOCK])
tmp6 = tl.broadcast_to(tmp4, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tl.full([1], 256, tl.int32)
tmp10 = tmp9.to(tl.float32)
tmp11 = tmp8 / tmp10
tmp12 = tmp4 - tmp11
tmp13 = tmp12 * tmp12
tmp14 = tl.broadcast_to(tmp13, [RBLOCK])
tmp16 = triton_helpers.promote_to_tensor(tl.sum(tmp14, 0))
tmp17 = tmp3 - tmp11
tmp18 = 256.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp24 = tmp23 * tmp0
tmp26 = tmp24 + tmp25
tmp28 = tmp26 + tmp27
tl.store(out_ptr0 + x0, tmp0, None)
tl.store(in_out_ptr0 + (r3 + 256 * x0), tmp3, None)
tl.store(in_out_ptr1 + (r3 + 256 * x0), tmp28, None)
tl.store(out_ptr3 + x0, tmp22, None)
tl.store(out_ptr1 + x0, tmp11, None)
@triton.jit
def triton_per_fused__native_batch_norm_legit_convolution_13(in_out_ptr0,
in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r2 = rindex
x3 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (r2 + 256 * x3), None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = tl.broadcast_to(tmp3, [RBLOCK])
tmp7 = triton_helpers.promote_to_tensor(tl.sum(tmp5, 0))
tmp8 = tl.full([1], 256, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp3 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = 256.0
tmp17 = tmp15 / tmp16
tmp18 = 1e-05
tmp19 = tmp17 + tmp18
tmp20 = libdevice.rsqrt(tmp19)
tl.store(in_out_ptr0 + (r2 + 256 * x3), tmp2, None)
tl.store(out_ptr2 + x3, tmp20, None)
tl.store(out_ptr0 + x3, tmp10, None)
tl.store(out_ptr1 + x3, tmp15, None)
@triton.jit
def triton_poi_fused_arange_14(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_15(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_reflection_pad2d_16(in_ptr0, in_ptr1,
in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 34 % 34
x0 = xindex % 34
x4 = xindex // 1156
x2 = xindex // 1156 % 128
x7 = xindex
tmp0 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x1))), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (31 + -1 * tl_math.abs(-31 + tl_math.abs(-1 +
x0))), None, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x4, None, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x4, None, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr4 + x4, None, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + x2, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 16 * tmp4 + 256 * x4), None,
eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = 256.0
tmp14 = tmp12 / tmp13
tmp15 = 1e-05
tmp16 = tmp14 + tmp15
tmp17 = libdevice.rsqrt(tmp16)
tmp18 = tmp11 * tmp17
tmp20 = tmp18 * tmp19
tmp22 = tmp20 + tmp21
tmp23 = tl.load(in_ptr6 + (tmp8 + 16 * tmp4 + 256 * x4), None,
eviction_policy='evict_last')
tmp24 = tmp22 + tmp23
tl.store(out_ptr0 + x7, tmp24, None)
@triton.jit
def triton_poi_fused_arange_17(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tl.store(out_ptr0 + x0, tmp0, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_mul_18(out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = tmp3.to(tl.int32)
tl.store(out_ptr0 + x0, tmp4, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_reflection_pad2d_relu_19(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK:
tl.constexpr):
xnumel = 1115136
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 66 % 66
x0 = xindex % 66
x2 = xindex // 4356
x5 = xindex
tmp0 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 +
x1))), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (63 + -1 * tl_math.abs(-63 + tl_math.abs(-1 +
x0))), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + x2, xmask, eviction_policy='evict_last')
tmp14 = tl.load(in_ptr4 + x2, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr1 + (tmp8 + 32 * tmp4 + 1024 * x2), xmask,
eviction_policy='evict_last')
tmp11 = tmp9 - tmp10
tmp13 = tmp11 * tmp12
tmp15 = tmp13 * tmp14
tmp17 = tmp15 + tmp16
tmp18 = tl.full([1], 0, tl.int32)
tmp19 = triton_helpers.maximum(tmp18, tmp17)
tl.store(out_ptr0 + x5, tmp19, xmask)
@triton.jit
def triton_poi_fused_reflection_pad2d_relu_20(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 72
x1 = xindex // 72 % 72
x2 = xindex // 5184
x3 = xindex
tmp0 = tl.load(in_ptr0 + (4095 + -1 * tl_math.abs(-63 + tl_math.abs(-4 +
x0)) + -64 * tl_math.abs(-63 + tl_math.abs(-4 + x1)) + 4096 * x2),
None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x2, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + x2, None, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp8 = tmp6 + tmp7
tmp9 = tl.full([1], 0, tl.int32)
tmp10 = triton_helpers.maximum(tmp9, tmp8)
tl.store(out_ptr0 + x3, tmp10, None)
@triton.jit
def triton_poi_fused_convolution_21(in_out_ptr0, in_ptr0, xnumel, XBLOCK:
tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 3
tmp0 = tl.load(in_out_ptr0 + x3, None)
tmp1 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x3, tmp2, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47,
primals_48, primals_49, primals_50, primals_51, primals_52,
primals_53, primals_54, primals_55, primals_56, primals_57,
primals_58, primals_59, primals_60, primals_61, primals_62, primals_63
) = args
args.clear()
assert_size_stride(primals_1, (4, 3, 64, 64), (12288, 4096, 64, 1))
assert_size_stride(primals_2, (32, 3, 9, 9), (243, 81, 9, 1))
assert_size_stride(primals_3, (32,), (1,))
assert_size_stride(primals_4, (32,), (1,))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64,), (1,))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128,), (1,))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (128,), (1,))
assert_size_stride(primals_16, (128,), (1,))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_19, (128,), (1,))
assert_size_stride(primals_20, (128,), (1,))
assert_size_stride(primals_21, (128,), (1,))
assert_size_stride(primals_22, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_23, (128,), (1,))
assert_size_stride(primals_24, (128,), (1,))
assert_size_stride(primals_25, (128,), (1,))
assert_size_stride(primals_26, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_27, (128,), (1,))
assert_size_stride(primals_28, (128,), (1,))
assert_size_stride(primals_29, (128,), (1,))
assert_size_stride(primals_30, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_31, (128,), (1,))
assert_size_stride(primals_32, (128,), (1,))
assert_size_stride(primals_33, (128,), (1,))
assert_size_stride(primals_34, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_35, (128,), (1,))
assert_size_stride(primals_36, (128,), (1,))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (128,), (1,))
assert_size_stride(primals_40, (128,), (1,))
assert_size_stride(primals_41, (128,), (1,))
assert_size_stride(primals_42, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_43, (128,), (1,))
assert_size_stride(primals_44, (128,), (1,))
assert_size_stride(primals_45, (128,), (1,))
assert_size_stride(primals_46, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_47, (128,), (1,))
assert_size_stride(primals_48, (128,), (1,))
assert_size_stride(primals_49, (128,), (1,))
assert_size_stride(primals_50, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_51, (128,), (1,))
assert_size_stride(primals_52, (128,), (1,))
assert_size_stride(primals_53, (128,), (1,))
assert_size_stride(primals_54, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_55, (64,), (1,))
assert_size_stride(primals_56, (64,), (1,))
assert_size_stride(primals_57, (64,), (1,))
assert_size_stride(primals_58, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_59, (32,), (1,))
assert_size_stride(primals_60, (32,), (1,))
assert_size_stride(primals_61, (32,), (1,))
assert_size_stride(primals_62, (3, 32, 9, 9), (2592, 81, 9, 1))
assert_size_stride(primals_63, (3,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 3, 72, 72), (15552, 5184, 72, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_reflection_pad2d_0[grid(62208)](primals_1, buf0,
62208, XBLOCK=256, num_warps=4, num_stages=1)
del primals_1
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf2 = buf1
del buf1
buf5 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.float32
)
buf6 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128), torch
.float32)
buf8 = reinterpret_tensor(buf6, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf6
triton_red_fused__native_batch_norm_legit_convolution_1[grid(128)](buf2
, buf8, primals_3, buf5, 128, 4096, XBLOCK=1, RBLOCK=2048,
num_warps=16, num_stages=1)
del primals_3
buf3 = empty_strided_cuda((128,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(128)](primals_4, buf3, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_4
buf4 = empty_strided_cuda((128,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(128)](primals_5, buf4, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_5
buf9 = empty_strided_cuda((4, 32, 66, 66), (139392, 4356, 66, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_3[grid(557568)](buf2, buf5,
buf8, buf3, buf4, buf9, 557568, XBLOCK=512, num_warps=8,
num_stages=1)
buf10 = extern_kernels.convolution(buf9, primals_6, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf11 = buf10
del buf10
buf14 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.
float32)
buf15 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf17 = reinterpret_tensor(buf15, (1, 256, 1, 1), (256, 1, 1, 1), 0)
del buf15
triton_per_fused__native_batch_norm_legit_convolution_4[grid(256)](
buf11, buf17, primals_7, buf14, 256, 1024, num_warps=8,
num_stages=1)
del primals_7
buf12 = empty_strided_cuda((256,), (1,), torch.float32)
triton_poi_fused_repeat_5[grid(256)](primals_8, buf12, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_8
buf13 = empty_strided_cuda((256,), (1,), torch.float32)
triton_poi_fused_repeat_5[grid(256)](primals_9, buf13, 256, XBLOCK=
128, num_warps=4, num_stages=1)
del primals_9
buf18 = empty_strided_cuda((4, 64, 34, 34), (73984, 1156, 34, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_6[grid(295936)](buf11, buf14,
buf17, buf12, buf13, buf18, 295936, XBLOCK=1024, num_warps=4,
num_stages=1)
buf19 = extern_kernels.convolution(buf18, primals_10, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf19, (4, 128, 16, 16), (32768, 256, 16, 1))
buf21 = empty_strided_cuda((512,), (1,), torch.float32)
buf22 = empty_strided_cuda((512,), (1,), torch.float32)
buf20 = buf19
del buf19
buf23 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf24 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf26 = reinterpret_tensor(buf24, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf24
buf27 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
triton_per_fused__native_batch_norm_legit_convolution_relu_repeat_7[
grid(512)](buf20, buf26, primals_12, primals_13, primals_11,
buf21, buf22, buf23, buf27, 512, 256, num_warps=2, num_stages=1)
del primals_11
del primals_12
del primals_13
buf28 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_8[grid(165888)](buf27, buf28,
165888, XBLOCK=512, num_warps=8, num_stages=1)
buf29 = extern_kernels.convolution(buf28, primals_14, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf29, (4, 128, 16, 16), (32768, 256, 16, 1))
buf30 = buf29
del buf29
buf33 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf34 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf36 = reinterpret_tensor(buf34, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf34
triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)](
buf30, buf36, primals_15, buf33, 512, 256, num_warps=2,
num_stages=1)
del primals_15
buf31 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_10[grid(512)](primals_16, buf31, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_16
buf32 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_10[grid(512)](primals_17, buf32, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf37 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf30,
buf33, buf36, buf31, buf32, buf37, 165888, XBLOCK=1024,
num_warps=4, num_stages=1)
buf38 = extern_kernels.convolution(buf37, primals_18, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 128, 16, 16), (32768, 256, 16, 1))
buf40 = empty_strided_cuda((512,), (1,), torch.float32)
buf39 = buf38
del buf38
buf41 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf45 = buf27
del buf27
buf44 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[
grid(512)](buf39, buf45, primals_20, primals_19, primals_21,
buf40, buf41, buf44, 512, 256, num_warps=2, num_stages=1)
del primals_19
del primals_20
del primals_21
buf46 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_8[grid(165888)](buf45, buf46,
165888, XBLOCK=512, num_warps=8, num_stages=1)
buf47 = extern_kernels.convolution(buf46, primals_22, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf47, (4, 128, 16, 16), (32768, 256, 16, 1))
buf48 = buf47
del buf47
buf51 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf52 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf54 = reinterpret_tensor(buf52, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf52
triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)](
buf48, buf54, primals_23, buf51, 512, 256, num_warps=2,
num_stages=1)
del primals_23
buf49 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_10[grid(512)](primals_24, buf49, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_24
buf50 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_10[grid(512)](primals_25, buf50, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_25
buf55 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf48,
buf51, buf54, buf49, buf50, buf55, 165888, XBLOCK=1024,
num_warps=4, num_stages=1)
buf56 = extern_kernels.convolution(buf55, primals_26, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf56, (4, 128, 16, 16), (32768, 256, 16, 1))
buf58 = empty_strided_cuda((512,), (1,), torch.float32)
buf57 = buf56
del buf56
buf59 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf63 = buf45
del buf45
buf62 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[
grid(512)](buf57, buf63, primals_28, primals_27, primals_29,
buf58, buf59, buf62, 512, 256, num_warps=2, num_stages=1)
del primals_27
del primals_28
del primals_29
buf64 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_8[grid(165888)](buf63, buf64,
165888, XBLOCK=512, num_warps=8, num_stages=1)
buf65 = extern_kernels.convolution(buf64, primals_30, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf65, (4, 128, 16, 16), (32768, 256, 16, 1))
buf66 = buf65
del buf65
buf69 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf70 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf72 = reinterpret_tensor(buf70, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf70
triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)](
buf66, buf72, primals_31, buf69, 512, 256, num_warps=2,
num_stages=1)
del primals_31
buf67 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_10[grid(512)](primals_32, buf67, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_32
buf68 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_10[grid(512)](primals_33, buf68, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_33
buf73 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf66,
buf69, buf72, buf67, buf68, buf73, 165888, XBLOCK=1024,
num_warps=4, num_stages=1)
buf74 = extern_kernels.convolution(buf73, primals_34, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf74, (4, 128, 16, 16), (32768, 256, 16, 1))
buf76 = empty_strided_cuda((512,), (1,), torch.float32)
buf75 = buf74
del buf74
buf77 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf81 = buf63
del buf63
buf80 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[
grid(512)](buf75, buf81, primals_36, primals_35, primals_37,
buf76, buf77, buf80, 512, 256, num_warps=2, num_stages=1)
del primals_35
del primals_36
del primals_37
buf82 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_8[grid(165888)](buf81, buf82,
165888, XBLOCK=512, num_warps=8, num_stages=1)
buf83 = extern_kernels.convolution(buf82, primals_38, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1))
buf84 = buf83
del buf83
buf87 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf88 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf90 = reinterpret_tensor(buf88, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf88
triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)](
buf84, buf90, primals_39, buf87, 512, 256, num_warps=2,
num_stages=1)
del primals_39
buf85 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_10[grid(512)](primals_40, buf85, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_40
buf86 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_10[grid(512)](primals_41, buf86, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_41
buf91 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf84,
buf87, buf90, buf85, buf86, buf91, 165888, XBLOCK=1024,
num_warps=4, num_stages=1)
buf92 = extern_kernels.convolution(buf91, primals_42, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf92, (4, 128, 16, 16), (32768, 256, 16, 1))
buf94 = empty_strided_cuda((512,), (1,), torch.float32)
buf93 = buf92
del buf92
buf95 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf99 = buf81
del buf81
buf98 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused__native_batch_norm_legit_add_convolution_repeat_12[
grid(512)](buf93, buf99, primals_44, primals_43, primals_45,
buf94, buf95, buf98, 512, 256, num_warps=2, num_stages=1)
del primals_43
del primals_44
del primals_45
buf100 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_8[grid(165888)](buf99, buf100,
165888, XBLOCK=512, num_warps=8, num_stages=1)
buf101 = extern_kernels.convolution(buf100, primals_46, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf101, (4, 128, 16, 16), (32768, 256, 16, 1))
buf102 = buf101
del buf101
buf105 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 1, 1), torch.
float32)
buf106 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf108 = reinterpret_tensor(buf106, (1, 512, 1, 1), (512, 1, 1, 1), 0)
del buf106
triton_per_fused__native_batch_norm_legit_convolution_9[grid(512)](
buf102, buf108, primals_47, buf105, 512, 256, num_warps=2,
num_stages=1)
del primals_47
buf103 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_10[grid(512)](primals_48, buf103, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_48
buf104 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_10[grid(512)](primals_49, buf104, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_49
buf109 = empty_strided_cuda((4, 128, 18, 18), (41472, 324, 18, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_11[grid(165888)](buf102,
buf105, buf108, buf103, buf104, buf109, 165888, XBLOCK=1024,
num_warps=4, num_stages=1)
buf110 = extern_kernels.convolution(buf109, primals_50, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf110, (4, 128, 16, 16), (32768, 256, 16, 1))
buf111 = buf110
del buf110
buf113 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf114 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
buf116 = empty_strided_cuda((1, 512, 1, 1), (512, 1, 512, 512),
torch.float32)
triton_per_fused__native_batch_norm_legit_convolution_13[grid(512)](
buf111, primals_51, buf113, buf114, buf116, 512, 256, num_warps
=2, num_stages=1)
del primals_51
buf112 = empty_strided_cuda((512,), (1,), torch.float32)
triton_poi_fused_repeat_10[grid(512)](primals_52, buf112, 512,
XBLOCK=256, num_warps=4, num_stages=1)
del primals_52
buf117 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused_arange_14[grid(32)](buf117, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf118 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_15[grid(32)](buf118, 32,
XBLOCK=32, num_warps=1, num_stages=1)
buf119 = empty_strided_cuda((4, 128, 34, 34), (147968, 1156, 34, 1),
torch.float32)
triton_poi_fused__unsafe_index_add_reflection_pad2d_16[grid(591872)](
buf118, buf111, buf113, buf114, buf112, primals_53, buf99,
buf119, 591872, XBLOCK=512, num_warps=8, num_stages=1)
del buf114
del buf99
del primals_53
buf120 = extern_kernels.convolution(buf119, primals_54, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf120, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf121 = buf120
del buf120
buf124 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 1, 1), torch.
float32)
buf125 = empty_strided_cuda((1, 256, 1, 1), (256, 1, 256, 256),
torch.float32)
buf127 = reinterpret_tensor(buf125, (1, 256, 1, 1), (256, 1, 1, 1), 0)
del buf125
triton_per_fused__native_batch_norm_legit_convolution_4[grid(256)](
buf121, buf127, primals_55, buf124, 256, 1024, num_warps=8,
num_stages=1)
del primals_55
buf122 = empty_strided_cuda((256,), (1,), torch.float32)
triton_poi_fused_repeat_5[grid(256)](primals_56, buf122, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_56
buf123 = empty_strided_cuda((256,), (1,), torch.float32)
triton_poi_fused_repeat_5[grid(256)](primals_57, buf123, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_57
buf128 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_arange_17[grid(64)](buf128, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf129 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_add_arange_mul_18[grid(64)](buf129, 64,
XBLOCK=64, num_warps=1, num_stages=1)
buf130 = empty_strided_cuda((4, 64, 66, 66), (278784, 4356, 66, 1),
torch.float32)
triton_poi_fused__unsafe_index_reflection_pad2d_relu_19[grid(1115136)](
buf129, buf121, buf124, buf127, buf122, buf123, buf130, 1115136,
XBLOCK=1024, num_warps=4, num_stages=1)
buf131 = extern_kernels.convolution(buf130, primals_58, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf131, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf132 = buf131
del buf131
buf135 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 1, 1), torch.
float32)
buf136 = empty_strided_cuda((1, 128, 1, 1), (128, 1, 128, 128),
torch.float32)
buf138 = reinterpret_tensor(buf136, (1, 128, 1, 1), (128, 1, 1, 1), 0)
del buf136
triton_red_fused__native_batch_norm_legit_convolution_1[grid(128)](
buf132, buf138, primals_59, buf135, 128, 4096, XBLOCK=1, RBLOCK
=2048, num_warps=16, num_stages=1)
del primals_59
buf133 = empty_strided_cuda((128,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(128)](primals_60, buf133, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_60
buf134 = empty_strided_cuda((128,), (1,), torch.float32)
triton_poi_fused_repeat_2[grid(128)](primals_61, buf134, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_61
buf139 = empty_strided_cuda((4, 32, 72, 72), (165888, 5184, 72, 1),
torch.float32)
triton_poi_fused_reflection_pad2d_relu_20[grid(663552)](buf132,
buf135, buf138, buf133, buf134, buf139, 663552, XBLOCK=1024,
num_warps=4, num_stages=1)
buf140 = extern_kernels.convolution(buf139, primals_62, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf140, (4, 3, 64, 64), (12288, 4096, 64, 1))
buf141 = buf140
del buf140
triton_poi_fused_convolution_21[grid(49152)](buf141, primals_63,
49152, XBLOCK=256, num_warps=4, num_stages=1)
del primals_63
return (buf141, primals_2, primals_6, primals_10, primals_14,
primals_18, primals_22, primals_26, primals_30, primals_34,
primals_38, primals_42, primals_46, primals_50, primals_54,
primals_58, primals_62, buf0, buf2, buf3, buf4, buf5, buf8, buf9,
buf11, buf12, buf13, buf14, buf17, buf18, buf20, buf21, buf22,
buf23, buf26, buf28, buf30, buf31, buf32, buf33, buf36, buf37,
buf39, buf40, reinterpret_tensor(buf44, (512,), (1,), 0), buf46,
buf48, buf49, buf50, buf51, buf54, buf55, buf57, buf58,
reinterpret_tensor(buf62, (512,), (1,), 0), buf64, buf66, buf67,
buf68, buf69, buf72, buf73, buf75, buf76, reinterpret_tensor(buf80,
(512,), (1,), 0), buf82, buf84, buf85, buf86, buf87, buf90, buf91,
buf93, buf94, reinterpret_tensor(buf98, (512,), (1,), 0), buf100,
buf102, buf103, buf104, buf105, buf108, buf109, buf111, buf112,
reinterpret_tensor(buf116, (512,), (1,), 0), buf117, buf118, buf119,
buf121, buf122, buf123, buf124, buf127, buf128, buf129, buf130,
buf132, buf133, buf134, buf135, buf138, buf139, reinterpret_tensor(
buf113, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(
buf95, (1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf77,
(1, 512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf59, (1,
512, 1, 1), (512, 1, 1, 1), 0), reinterpret_tensor(buf41, (1, 512,
1, 1), (512, 1, 1, 1), 0))
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels,
kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = torch.nn.functional.interpolate(x_in, mode='nearest',
scale_factor=self.upsample)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
class TransformerNetNew(torch.nn.Module):
"""
From https://github.com/pytorch/examples/blob/master/fast_neural_style/neural_style/transformer_net.py
"""
def __init__(self):
super(TransformerNetNew, self).__init__()
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = torch.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = torch.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = torch.nn.InstanceNorm2d(128, affine=True)
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1,
upsample=2)
self.in4 = torch.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1,
upsample=2)
self.in5 = torch.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
self.relu = torch.nn.ReLU()
def forward(self, input_0):
primals_2 = self.conv1.conv2d.weight
primals_3 = self.conv1.conv2d.bias
primals_4 = self.in1.weight
primals_5 = self.in1.bias
primals_6 = self.conv2.conv2d.weight
primals_7 = self.conv2.conv2d.bias
primals_8 = self.in2.weight
primals_9 = self.in2.bias
primals_10 = self.conv3.conv2d.weight
primals_11 = self.conv3.conv2d.bias
primals_12 = self.in3.weight
primals_13 = self.in3.bias
primals_14 = self.res1.conv1.conv2d.weight
primals_15 = self.res1.conv1.conv2d.bias
primals_16 = self.res1.in1.weight
primals_17 = self.res1.in1.bias
primals_18 = self.res1.conv2.conv2d.weight
primals_19 = self.res1.conv2.conv2d.bias
primals_20 = self.res1.in2.weight
primals_21 = self.res1.in2.bias
primals_22 = self.res2.conv1.conv2d.weight
primals_23 = self.res2.conv1.conv2d.bias
primals_24 = self.res2.in1.weight
primals_25 = self.res2.in1.bias
primals_26 = self.res2.conv2.conv2d.weight
primals_27 = self.res2.conv2.conv2d.bias
primals_28 = self.res2.in2.weight
primals_29 = self.res2.in2.bias
primals_30 = self.res3.conv1.conv2d.weight
primals_31 = self.res3.conv1.conv2d.bias
primals_32 = self.res3.in1.weight
primals_33 = self.res3.in1.bias
primals_34 = self.res3.conv2.conv2d.weight
primals_35 = self.res3.conv2.conv2d.bias
primals_36 = self.res3.in2.weight
primals_37 = self.res3.in2.bias
primals_38 = self.res4.conv1.conv2d.weight
primals_39 = self.res4.conv1.conv2d.bias
primals_40 = self.res4.in1.weight
primals_41 = self.res4.in1.bias
primals_42 = self.res4.conv2.conv2d.weight
primals_43 = self.res4.conv2.conv2d.bias
primals_44 = self.res4.in2.weight
primals_45 = self.res4.in2.bias
primals_46 = self.res5.conv1.conv2d.weight
primals_47 = self.res5.conv1.conv2d.bias
primals_48 = self.res5.in1.weight
primals_49 = self.res5.in1.bias
primals_50 = self.res5.conv2.conv2d.weight
primals_51 = self.res5.conv2.conv2d.bias
primals_52 = self.res5.in2.weight
primals_53 = self.res5.in2.bias
primals_54 = self.deconv1.conv2d.weight
primals_55 = self.deconv1.conv2d.bias
primals_56 = self.in4.weight
primals_57 = self.in4.bias
primals_58 = self.deconv2.conv2d.weight
primals_59 = self.deconv2.conv2d.bias
primals_60 = self.in5.weight
primals_61 = self.in5.bias
primals_62 = self.deconv3.conv2d.weight
primals_63 = self.deconv3.conv2d.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47, primals_48, primals_49,
primals_50, primals_51, primals_52, primals_53, primals_54,
primals_55, primals_56, primals_57, primals_58, primals_59,
primals_60, primals_61, primals_62, primals_63])
return output[0]
|
EdenBD/MultiModalStory-demo
|
TransformerNet
| false | 13,699 |
[
"Apache-2.0"
] | 154 |
5e95e2aca766ca7c850e8db4973b8d51dfdba7f8
|
https://github.com/EdenBD/MultiModalStory-demo/tree/5e95e2aca766ca7c850e8db4973b8d51dfdba7f8
|
CategoricalActor
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/r3/cr3febcwm3t44fuoitsx3ou2p6xg4sk4f7unagmmrvffasxf47te.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_1,), kwargs = {})
# %le_1 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_0 = async_compile.triton('triton_poi_fused_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hz/chz2sqsqk26mwhf2dxhgh44jfpu2er5yqjftwkzfav5ctqtx5e7f.py
# Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# prob => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view_5, [-1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_5, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3f/c3fx6bzkalkw7u7askqdnz4rzlcoyqiec4r434sjc5x3axxgkrmr.py
# Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# prob => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_2 = async_compile.triton('triton_poi_fused__softmax_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf0 # reuse
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0.run(buf1, primals_2, buf8, 256, grid=grid(256), stream=stream0)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf2 # reuse
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_0.run(buf3, primals_5, buf7, 256, grid=grid(256), stream=stream0)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_2], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 256, grid=grid(256), stream=stream0)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0); del buf4 # reuse
# Topologically Sorted Source Nodes: [prob], Original ATen: [aten._softmax]
triton_poi_fused__softmax_2.run(buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf5
return (buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch.distributions import Categorical
import torch.nn.functional as F
import torch.nn as nn
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class CategoricalActor(nn.Module):
def __init__(self, state_dim, hidden_dim, action_dim):
super(CategoricalActor, self).__init__()
self.linear1 = nn.Linear(state_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, action_dim)
self.apply(weights_init_)
def forward(self, state):
x = F.relu(self.linear1(state))
x = F.relu(self.linear2(x))
x = self.linear3(x)
prob = F.softmax(x, -1)
return prob
def sample(self, state):
prob = self.forward(state)
distribution = Categorical(probs=prob)
sample_action = distribution.sample().unsqueeze(-1)
z = (prob == 0.0).float() * 1e-08
logprob = torch.log(prob + z)
greedy = torch.argmax(prob, dim=-1).unsqueeze(-1)
return sample_action, prob, logprob, greedy
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'state_dim': 4, 'hidden_dim': 4, 'action_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch.distributions import Categorical
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_relu_threshold_backward_0(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_2(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4, 4), (4, 1))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_3, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), out=buf0)
del primals_1
buf1 = reinterpret_tensor(buf0, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf0
buf8 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
get_raw_stream(0)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf1,
primals_2, buf8, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_2
buf2 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf1, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
buf3 = reinterpret_tensor(buf2, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf2
buf7 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_0[grid(256)](buf3,
primals_5, buf7, 256, XBLOCK=256, num_warps=4, num_stages=1)
del primals_5
buf4 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_7, reinterpret_tensor(buf3, (64, 4), (
4, 1), 0), reinterpret_tensor(primals_6, (4, 4), (1, 4), 0),
alpha=1, beta=1, out=buf4)
del primals_7
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused__softmax_1[grid(256)](buf4, buf5, 256, XBLOCK=128,
num_warps=4, num_stages=1)
buf6 = reinterpret_tensor(buf4, (4, 4, 4, 4), (64, 16, 4, 1), 0)
del buf4
triton_poi_fused__softmax_2[grid(256)](buf5, buf6, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del buf5
return buf6, reinterpret_tensor(primals_3, (64, 4), (4, 1), 0
), reinterpret_tensor(buf1, (64, 4), (4, 1), 0), reinterpret_tensor(
buf3, (64, 4), (4, 1), 0), buf6, primals_6, buf7, primals_4, buf8
def weights_init_(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight, gain=1)
torch.nn.init.constant_(m.bias, 0)
class CategoricalActorNew(nn.Module):
def __init__(self, state_dim, hidden_dim, action_dim):
super(CategoricalActorNew, self).__init__()
self.linear1 = nn.Linear(state_dim, hidden_dim)
self.linear2 = nn.Linear(hidden_dim, hidden_dim)
self.linear3 = nn.Linear(hidden_dim, action_dim)
self.apply(weights_init_)
def sample(self, state):
prob = self.forward(state)
distribution = Categorical(probs=prob)
sample_action = distribution.sample().unsqueeze(-1)
z = (prob == 0.0).float() * 1e-08
logprob = torch.log(prob + z)
greedy = torch.argmax(prob, dim=-1).unsqueeze(-1)
return sample_action, prob, logprob, greedy
def forward(self, input_0):
primals_1 = self.linear1.weight
primals_2 = self.linear1.bias
primals_4 = self.linear2.weight
primals_5 = self.linear2.bias
primals_6 = self.linear3.weight
primals_7 = self.linear3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
FlickerNiko/ai_lib
|
CategoricalActor
| false | 13,700 |
[
"MIT"
] | 99 |
7087d4569c9a827d35dd8735b55a080834d31a82
|
https://github.com/FlickerNiko/ai_lib/tree/7087d4569c9a827d35dd8735b55a080834d31a82
|
ClassHead
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/u3/cu3litezfpnwhpnfnfuj6dtimz6ml42wmcwnwxlnovd4p5lvyin4.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (2097152*y1)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wj/cwjdbk4o7ympk744ppb5oagoq2dkyoyyvx4uy4qz3ljiyxwqqnut.py
# Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view]
# Source node to ATen node mapping:
# out_1 => clone
# view => view
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %view : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [4, -1, 2]), kwargs = {})
triton_poi_fused_clone_view_1 = async_compile.triton('triton_poi_fused_clone_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_view_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 98304
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 6
tmp0 = tl.load(in_out_ptr0 + (x4), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x4), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (6, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (6, ), (1, ))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 2048, 4096, grid=grid(2048, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 6, 64, 64), (24576, 1, 384, 6))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 6), (24576, 384, 6, 1), 0); del buf1 # reuse
buf3 = reinterpret_tensor(buf2, (4, 12288, 2), (24576, 2, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view]
triton_poi_fused_clone_view_1.run(buf3, primals_2, 98304, grid=grid(98304), stream=stream0)
del primals_2
return (buf3, primals_1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((6, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((6, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from itertools import product as product
import torch.nn as nn
class ClassHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(ClassHead, self).__init__()
self.num_anchors = num_anchors
self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2,
kernel_size=(1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 2)
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from itertools import product as product
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 6
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (6, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (6,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 6, 64, 64), (24576, 1, 384, 6))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 6), (24576, 384, 6, 1), 0)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 12288, 2), (24576, 2, 1), 0)
del buf2
triton_poi_fused_clone_view_1[grid(98304)](buf3, primals_2, 98304,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_1, buf0
class ClassHeadNew(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(ClassHeadNew, self).__init__()
self.num_anchors = num_anchors
self.conv1x1 = nn.Conv2d(inchannels, self.num_anchors * 2,
kernel_size=(1, 1), stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1x1.weight
primals_2 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Edward1900/Face-Detector-1MB-with-landmark
|
ClassHead
| false | 13,701 |
[
"MIT"
] | 907 |
16c16c4efa74b0264e0fd7fe0ddc0160f540a4bf
|
https://github.com/Edward1900/Face-Detector-1MB-with-landmark/tree/16c16c4efa74b0264e0fd7fe0ddc0160f540a4bf
|
BboxHead
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/u3/cu3litezfpnwhpnfnfuj6dtimz6ml42wmcwnwxlnovd4p5lvyin4.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = (yindex // 512)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), None, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (512*x2) + (2097152*y1)), tmp0, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tn/ctncuf7vgmv2algyzlhp7ada7ijky7jntejykq6f6paqfcnifxfc.py
# Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view]
# Source node to ATen node mapping:
# out_1 => clone
# view => view
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
# %view : [num_users=1] = call_function[target=torch.ops.aten.reshape.default](args = (%clone, [4, -1, 4]), kwargs = {})
triton_poi_fused_clone_view_1 = async_compile.triton('triton_poi_fused_clone_view_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_view_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 196608
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + (x4), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x4), tmp2, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (12, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (12, ), (1, ))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_3, buf0, 2048, 4096, grid=grid(2048, 4096), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 12, 64, 64), (49152, 1, 768, 12))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 12), (49152, 768, 12, 1), 0); del buf1 # reuse
buf3 = reinterpret_tensor(buf2, (4, 12288, 4), (49152, 4, 1), 0); del buf2 # reuse
# Topologically Sorted Source Nodes: [out_1, view], Original ATen: [aten.clone, aten.view]
triton_poi_fused_clone_view_1.run(buf3, primals_2, 196608, grid=grid(196608), stream=stream0)
del primals_2
return (buf3, primals_1, buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((12, 512, 1, 1), (512, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((12, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 512, 64, 64), (2097152, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from itertools import product as product
import torch.nn as nn
class BboxHead(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(BboxHead, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(
1, 1), stride=1, padding=0)
def forward(self, x):
out = self.conv1x1(x)
out = out.permute(0, 2, 3, 1).contiguous()
return out.view(out.shape[0], -1, 4)
def get_inputs():
return [torch.rand([4, 512, 64, 64])]
def get_init_inputs():
return [[], {}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from itertools import product as product
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 512
y1 = yindex // 512
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), None, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 512 * x2 + 2097152 * y1), tmp0, None)
@triton.jit
def triton_poi_fused_clone_view_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x4 = xindex
x0 = xindex % 12
tmp0 = tl.load(in_out_ptr0 + x4, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x4, tmp2, None)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (12, 512, 1, 1), (512, 1, 1, 1))
assert_size_stride(primals_2, (12,), (1,))
assert_size_stride(primals_3, (4, 512, 64, 64), (2097152, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 512, 64, 64), (2097152, 1, 32768, 512
), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(2048, 4096)](primals_3, buf0, 2048, 4096,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_3
buf1 = extern_kernels.convolution(buf0, primals_1, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 12, 64, 64), (49152, 1, 768, 12))
buf2 = reinterpret_tensor(buf1, (4, 64, 64, 12), (49152, 768, 12, 1), 0
)
del buf1
buf3 = reinterpret_tensor(buf2, (4, 12288, 4), (49152, 4, 1), 0)
del buf2
triton_poi_fused_clone_view_1[grid(196608)](buf3, primals_2, 196608,
XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
return buf3, primals_1, buf0
class BboxHeadNew(nn.Module):
def __init__(self, inchannels=512, num_anchors=3):
super(BboxHeadNew, self).__init__()
self.conv1x1 = nn.Conv2d(inchannels, num_anchors * 4, kernel_size=(
1, 1), stride=1, padding=0)
def forward(self, input_0):
primals_1 = self.conv1x1.weight
primals_2 = self.conv1x1.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Edward1900/Face-Detector-1MB-with-landmark
|
BboxHead
| false | 13,702 |
[
"MIT"
] | 907 |
16c16c4efa74b0264e0fd7fe0ddc0160f540a4bf
|
https://github.com/Edward1900/Face-Detector-1MB-with-landmark/tree/16c16c4efa74b0264e0fd7fe0ddc0160f540a4bf
|
openai_critic
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ms/cmsuzohbg5nq52jnvirovzkvykrzzko5xomu7zyu5e5u2lhegppw.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%primals_1, %primals_2], 1), kwargs = {})
triton_poi_fused_cat_0 = async_compile.triton('triton_poi_fused_cat_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/id/cidqzvuh2sg4l6gjkmsrg6phteh24hzr63uxf3nlwcm7rdbghvum.py
# Topologically Sorted Source Nodes: [x_cat], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x_cat => gt, mul, where
# Graph fragment:
# %add_tensor_1 : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default_1, %primals_4), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor_1, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor_1, 0.01), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add_tensor_1, %mul), kwargs = {})
triton_poi_fused_leaky_relu_1 = async_compile.triton('triton_poi_fused_leaky_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[512],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wi/cwil7l7wbd4zw3oqw2dakpun2vbi6idcwq5f4y65kwvvztpxmhnk.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
# Source node to ATen node mapping:
# x => gt_1, mul_1, where_1
# Graph fragment:
# %add_tensor : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_6), kwargs = {})
# %gt_1 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add_tensor, 0), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_tensor, 0.01), kwargs = {})
# %where_1 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %add_tensor, %mul_1), kwargs = {})
triton_poi_fused_leaky_relu_2 = async_compile.triton('triton_poi_fused_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr1 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (128, 8), (8, 1))
assert_size_stride(primals_4, (128, ), (1, ))
assert_size_stride(primals_5, (64, 128), (128, 1))
assert_size_stride(primals_6, (64, ), (1, ))
assert_size_stride(primals_7, (1, 64), (64, 1))
assert_size_stride(primals_8, (1, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
stream0 = get_raw_stream(0)
triton_poi_fused_cat_0.run(primals_1, primals_2, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 128), (1, 8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 128), (128, 1), torch.bool)
buf3 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_cat], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_1.run(buf1, primals_4, buf2, buf3, 512, grid=grid(512), stream=stream0)
del buf1
del primals_4
buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (128, 64), (1, 128), 0), out=buf4)
buf5 = empty_strided_cuda((4, 64), (64, 1), torch.bool)
buf6 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.leaky_relu]
triton_poi_fused_leaky_relu_2.run(buf4, primals_6, buf5, buf6, 256, grid=grid(256), stream=stream0)
del buf4
del primals_6
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [value], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_8, buf6, reinterpret_tensor(primals_7, (64, 1), (1, 64), 0), alpha=1, beta=1, out=buf8)
del primals_8
return (buf8, buf0, buf2, buf3, buf5, buf6, primals_7, primals_5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((128, 8), (8, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, 128), (128, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 64), (64, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class openai_critic(nn.Module):
def __init__(self, obs_shape_n, action_shape_n):
super(openai_critic, self).__init__()
self.LReLU = nn.LeakyReLU(0.01)
self.linear_c1 = nn.Linear(action_shape_n + obs_shape_n, 128)
self.linear_c2 = nn.Linear(128, 64)
self.linear_c = nn.Linear(64, 1)
self.reset_parameters()
self.train()
def reset_parameters(self):
nn.init.calculate_gain('leaky_relu')
nn.init.xavier_uniform_(self.linear_c1.weight, gain=nn.init.
calculate_gain('leaky_relu'))
nn.init.xavier_uniform_(self.linear_c2.weight, gain=nn.init.
calculate_gain('leaky_relu'))
nn.init.xavier_uniform_(self.linear_c.weight, gain=nn.init.
calculate_gain('leaky_relu'))
def forward(self, obs_input, action_input):
x_cat = self.LReLU(self.linear_c1(torch.cat([obs_input,
action_input], dim=1)))
x = self.LReLU(self.linear_c2(x_cat))
value = self.linear_c(x)
return value
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'obs_shape_n': 4, 'action_shape_n': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_cat_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_1(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 512
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
@triton.jit
def triton_poi_fused_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.01
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr1 + x2, tmp7, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8) = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (128, 8), (8, 1))
assert_size_stride(primals_4, (128,), (1,))
assert_size_stride(primals_5, (64, 128), (128, 1))
assert_size_stride(primals_6, (64,), (1,))
assert_size_stride(primals_7, (1, 64), (64, 1))
assert_size_stride(primals_8, (1,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_cat_0[grid(32)](primals_1, primals_2, buf0, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_1
del primals_2
buf1 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
extern_kernels.mm(buf0, reinterpret_tensor(primals_3, (8, 128), (1,
8), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((4, 128), (128, 1), torch.bool)
buf3 = empty_strided_cuda((4, 128), (128, 1), torch.float32)
triton_poi_fused_leaky_relu_1[grid(512)](buf1, primals_4, buf2,
buf3, 512, XBLOCK=128, num_warps=4, num_stages=1)
del buf1
del primals_4
buf4 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
extern_kernels.mm(buf3, reinterpret_tensor(primals_5, (128, 64), (1,
128), 0), out=buf4)
buf5 = empty_strided_cuda((4, 64), (64, 1), torch.bool)
buf6 = empty_strided_cuda((4, 64), (64, 1), torch.float32)
triton_poi_fused_leaky_relu_2[grid(256)](buf4, primals_6, buf5,
buf6, 256, XBLOCK=128, num_warps=4, num_stages=1)
del buf4
del primals_6
buf8 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
extern_kernels.addmm(primals_8, buf6, reinterpret_tensor(primals_7,
(64, 1), (1, 64), 0), alpha=1, beta=1, out=buf8)
del primals_8
return buf8, buf0, buf2, buf3, buf5, buf6, primals_7, primals_5
class openai_criticNew(nn.Module):
def __init__(self, obs_shape_n, action_shape_n):
super(openai_criticNew, self).__init__()
self.LReLU = nn.LeakyReLU(0.01)
self.linear_c1 = nn.Linear(action_shape_n + obs_shape_n, 128)
self.linear_c2 = nn.Linear(128, 64)
self.linear_c = nn.Linear(64, 1)
self.reset_parameters()
self.train()
def reset_parameters(self):
nn.init.calculate_gain('leaky_relu')
nn.init.xavier_uniform_(self.linear_c1.weight, gain=nn.init.
calculate_gain('leaky_relu'))
nn.init.xavier_uniform_(self.linear_c2.weight, gain=nn.init.
calculate_gain('leaky_relu'))
nn.init.xavier_uniform_(self.linear_c.weight, gain=nn.init.
calculate_gain('leaky_relu'))
def forward(self, input_0, input_1):
primals_3 = self.linear_c1.weight
primals_4 = self.linear_c1.bias
primals_5 = self.linear_c2.weight
primals_6 = self.linear_c2.bias
primals_7 = self.linear_c.weight
primals_8 = self.linear_c.bias
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8])
return output[0]
|
FlickerNiko/ai_lib
|
openai_critic
| false | 13,703 |
[
"MIT"
] | 99 |
7087d4569c9a827d35dd8735b55a080834d31a82
|
https://github.com/FlickerNiko/ai_lib/tree/7087d4569c9a827d35dd8735b55a080834d31a82
|
eSEModule
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# x => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/co/ccowauib7vy5gm7p4vj4ao45thoxwz7gg4fphlxqgn5idb7igf7i.py
# Topologically Sorted Source Nodes: [x_1, add, relu6, x_2, mul], Original ATen: [aten.convolution, aten.add, aten.hardtanh, aten.div, aten.mul]
# Source node to ATen node mapping:
# add => add
# mul => mul
# relu6 => clamp_max, clamp_min
# x_1 => convolution
# x_2 => div
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, 3.0), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6.0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %div), kwargs = {})
triton_poi_fused_add_convolution_div_hardtanh_mul_1 = async_compile.triton('triton_poi_fused_add_convolution_div_hardtanh_mul_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_div_hardtanh_mul_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = (xindex // 16)
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x4), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + (x3), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/jf/cjfxg4qmkc47we3c3cd47pg6lk6t4idzjkbdf3zyoyk4nr3a7ktb.py
# Topologically Sorted Source Nodes: [x_1, add], Original ATen: [aten.convolution, aten.add, aten.hardtanh_backward]
# Source node to ATen node mapping:
# add => add
# x_1 => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, 3.0), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%add, 0), kwargs = {})
# %ge : [num_users=1] = call_function[target=torch.ops.aten.ge.Scalar](args = (%add, 6), kwargs = {})
# %bitwise_or : [num_users=1] = call_function[target=torch.ops.aten.bitwise_or.Tensor](args = (%le, %ge), kwargs = {})
triton_poi_fused_add_convolution_hardtanh_backward_2 = async_compile.triton('triton_poi_fused_add_convolution_hardtanh_backward_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_hardtanh_backward_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1, add, relu6, x_2, mul], Original ATen: [aten.convolution, aten.add, aten.hardtanh, aten.div, aten.mul]
triton_poi_fused_add_convolution_div_hardtanh_mul_1.run(primals_1, buf2, primals_3, buf3, 256, grid=grid(256), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1, add], Original ATen: [aten.convolution, aten.add, aten.hardtanh_backward]
triton_poi_fused_add_convolution_hardtanh_backward_2.run(buf2, primals_3, buf4, 16, grid=grid(16), stream=stream0)
del buf2
del primals_3
return (buf3, primals_1, primals_2, buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
import torch.nn.functional as F
from torch import nn
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class eSEModule(nn.Module):
def __init__(self, channel, reduction=4):
super(eSEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0)
self.hsigmoid = Hsigmoid()
def forward(self, x):
input = x
x = self.avg_pool(x)
x = self.fc(x)
x = self.hsigmoid(x)
return input * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn.functional as F
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_convolution_div_hardtanh_mul_1(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x4 = xindex // 16
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x4, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = 3.0
tmp5 = tmp3 + tmp4
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = 6.0
tmp9 = triton_helpers.minimum(tmp7, tmp8)
tmp10 = 0.16666666666666666
tmp11 = tmp9 * tmp10
tmp12 = tmp0 * tmp11
tl.store(out_ptr0 + x3, tmp12, xmask)
@triton.jit
def triton_poi_fused_add_convolution_hardtanh_backward_2(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tmp7 = 6.0
tmp8 = tmp4 >= tmp7
tmp9 = tmp6 | tmp8
tl.store(out_ptr0 + x2, tmp9, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 1, 1), (4, 1, 1, 1))
buf3 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_convolution_div_hardtanh_mul_1[grid(256)](
primals_1, buf2, primals_3, buf3, 256, XBLOCK=256, num_warps=4,
num_stages=1)
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_add_convolution_hardtanh_backward_2[grid(16)](buf2,
primals_3, buf4, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf2
del primals_3
return buf3, primals_1, primals_2, buf1, buf4
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3.0, inplace=self.inplace) / 6.0
class eSEModuleNew(nn.Module):
def __init__(self, channel, reduction=4):
super(eSEModuleNew, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Conv2d(channel, channel, kernel_size=1, padding=0)
self.hsigmoid = Hsigmoid()
def forward(self, input_0):
primals_2 = self.fc.weight
primals_3 = self.fc.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
FluteXu/DW-Research
|
eSEModule
| false | 13,704 |
[
"Apache-2.0"
] | 780 |
6b559d2d1d440c07e5936a65cd74a3bc657962dc
|
https://github.com/FluteXu/DW-Research/tree/6b559d2d1d440c07e5936a65cd74a3bc657962dc
|
Encoder
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/q7/cq75winnysem6xhosadt6noej64bzsxr6gzsm2fc2lah52csbkdm.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lq/clqpojw3nbzqfutiuorzwvs6xjljcuuy2acp4zwufgezfm6n5yiq.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (16384*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wr/cwrbaplpfk7m6giisotqeykajo7urpubzk4y7hl6wjrhxxtwwukj.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (512*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dx/cdx5ml2qpofihmmpnvabqkpaoyptwmwdx4jtjzptieewtlhrqlmf.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1024*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kv/ckvorupxanzrceis7ogps6qnxhad4srcb6zrfzpkwhenxdnsalg7.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (2048*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bu/cbutwkpktfdb6jnvezw44p46qy637ourdemd2exlzxaqaoegf6do.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_5 = async_compile.triton('triton_poi_fused_convolution_relu_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6g/c6g2pvwzlpdatwtjyxsj3re4bkg36nused7hzn46o6upp6rqjbib.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_6 = async_compile.triton('triton_poi_fused_convolution_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ft/cftunu2ss5rrofxcggepxavg4uinzktmesjg5cu4qwgdtbeqlavk.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_7 = async_compile.triton('triton_poi_fused_convolution_relu_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_7', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/iv/civ57zh5itqhd5neawuafk6mfurvydzvq4uudn2orirzna5knb4r.py
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_3 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_8 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = (yindex // 256)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (256*x2) + (1024*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (256*x2) + (1024*y1)), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (4, 1024), (1024, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 1024), (1024, 1))
assert_size_stride(primals_13, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 128, 16, grid=grid(128, 16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 16, 4096, grid=grid(16, 4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 2048, 16, grid=grid(2048, 16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 16, grid=grid(8192, 16), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 32768, 16, grid=grid(32768, 16), stream=stream0)
del primals_8
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 31, 31), (30752, 1, 992, 32))
buf6 = buf5; del buf5 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_5.run(buf6, primals_2, 123008, grid=grid(123008), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 14, 14), (12544, 1, 896, 64))
buf8 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_6.run(buf8, primals_5, 50176, grid=grid(50176), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 128, 6, 6), (4608, 1, 768, 128))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_7.run(buf10, primals_7, 18432, grid=grid(18432), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 256, 2, 2), (1024, 1, 512, 256))
buf12 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.float32)
buf15 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_8.run(buf11, primals_9, buf12, buf15, 1024, 4, grid=grid(1024, 4), stream=stream0)
del buf11
del primals_9
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (4, 1024), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf13)
del primals_11
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logsigma], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf12, (4, 1024), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf14)
del primals_13
return (buf13, buf14, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10, reinterpret_tensor(buf12, (4, 1024), (1024, 1), 0), primals_12, primals_10, buf15, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
class Encoder(nn.Module):
""" VAE encoder """
def __init__(self, img_channels, latent_size):
super(Encoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.fc_mu = nn.Linear(2 * 2 * 256, latent_size)
self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1)
mu = self.fc_mu(x)
logsigma = self.fc_logsigma(x)
return mu, logsigma
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'img_channels': 4, 'latent_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.utils.data
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_5(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_6(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_7(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_8(in_ptr0, in_ptr1,
out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.
constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1024 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 256 * x2 + 1024 * y1), tmp6, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (4, 1024), (1024, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 1024), (1024, 1))
assert_size_stride(primals_13, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(128, 16)](primals_1, buf0, 128, 16, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch
.float32)
triton_poi_fused_1[grid(16, 4096)](primals_3, buf1, 16, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64),
torch.float32)
triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf5, (4, 32, 31, 31), (30752, 1, 992, 32))
buf6 = buf5
del buf5
triton_poi_fused_convolution_relu_5[grid(123008)](buf6, primals_2,
123008, XBLOCK=512, num_warps=8, num_stages=1)
del primals_2
buf7 = extern_kernels.convolution(buf6, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 14, 14), (12544, 1, 896, 64))
buf8 = buf7
del buf7
triton_poi_fused_convolution_relu_6[grid(50176)](buf8, primals_5,
50176, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf9 = extern_kernels.convolution(buf8, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 128, 6, 6), (4608, 1, 768, 128))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_7[grid(18432)](buf10, primals_7,
18432, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf11 = extern_kernels.convolution(buf10, buf4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 256, 2, 2), (1024, 1, 512, 256))
buf12 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.
float32)
buf15 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_8[grid(1024, 4)](
buf11, primals_9, buf12, buf15, 1024, 4, XBLOCK=4, YBLOCK=64,
num_warps=4, num_stages=1)
del buf11
del primals_9
buf13 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf12, (4, 1024
), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf13)
del primals_11
buf14 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf12, (4, 1024
), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf14)
del primals_13
return (buf13, buf14, buf0, buf1, buf2, buf3, buf4, buf6, buf8, buf10,
reinterpret_tensor(buf12, (4, 1024), (1024, 1), 0), primals_12,
primals_10, buf15)
class EncoderNew(nn.Module):
""" VAE encoder """
def __init__(self, img_channels, latent_size):
super(EncoderNew, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.fc_mu = nn.Linear(2 * 2 * 256, latent_size)
self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.conv3.weight
primals_7 = self.conv3.bias
primals_8 = self.conv4.weight
primals_9 = self.conv4.bias
primals_10 = self.fc_mu.weight
primals_11 = self.fc_mu.bias
primals_12 = self.fc_logsigma.weight
primals_13 = self.fc_logsigma.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13])
return output[0], output[1]
|
FabianSchuetze/world-models
|
Encoder
| false | 13,705 |
[
"MIT"
] | 440 |
d6abd9ce97409734a766eb67ccf0d1967ba9bf0c
|
https://github.com/FabianSchuetze/world-models/tree/d6abd9ce97409734a766eb67ccf0d1967ba9bf0c
|
ToRGB
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/an/canlvknqfe2j66bqqzu33wvsryryqy43ehqptswny7xgbi3zercv.py
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.add]
# Source node to ATen node mapping:
# out => convolution
# out_1 => add
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution, %primals_4), kwargs = {})
triton_poi_fused_add_convolution_0 = async_compile.triton('triton_poi_fused_add_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_convolution_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 3
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + (x3), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (3, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (3, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 3, 1, 1), (3, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 4, 4), (48, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [out, out_1], Original ATen: [aten.convolution, aten.add]
stream0 = get_raw_stream(0)
triton_poi_fused_add_convolution_0.run(buf1, primals_2, primals_4, 192, grid=grid(192), stream=stream0)
del primals_2
del primals_4
return (buf1, primals_1, primals_3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((3, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((3, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 3, 1, 1), (3, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.autograd import Function
import torch
from torch import nn
from torch.nn import functional as F
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
if input.device.type == 'cpu':
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0],
pad[1], pad[0], pad[1])
else:
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0
], pad[1], pad[0], pad[1]))
return out
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel,
down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.
in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx.
up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1,
ctx.pad_y0, ctx.pad_y1)
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = out_h, out_w
ctx.up = up_x, up_y
ctx.down = down_x, down_y
ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1
out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x,
down_y, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(grad_output, kernel,
grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size,
ctx.out_size)
return grad_input, None, None, None, None
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * factor ** 2
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = pad0, pad1
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=
self.pad)
return out
class ToRGB(nn.Module):
def __init__(self, in_channel, upsample=True, resolution=None,
blur_kernel=[1, 3, 3, 1]):
super().__init__()
self.is_upsample = upsample
self.resolution = resolution
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = nn.Conv2d(in_channel, 3, kernel_size=1)
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
def forward(self, input, skip=None):
out = self.conv(input)
out = out + self.bias
if skip is not None:
if self.is_upsample:
skip = self.upsample(skip)
out = out + skip
return out
def flops(self):
m = self.conv
kernel_ops = torch.zeros(m.weight.size()[2:]).numel()
bias_ops = 1
flops = 1 * self.resolution * self.resolution * 3 * (m.in_channels //
m.groups * kernel_ops + bias_ops)
if self.is_upsample:
w_shape = 1, 1, 4, 4
kernel_ops = torch.zeros(w_shape[2:]).numel()
flops = 1 * 3 * self.resolution * self.resolution * (3 * kernel_ops
)
return flops
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch.autograd import Function
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
@triton.jit
def triton_poi_fused_add_convolution_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 3
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tl.store(in_out_ptr0 + x3, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (3, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (3,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 3, 1, 1), (3, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 3, 4, 4), (48, 16, 4, 1))
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_convolution_0[grid(192)](buf1, primals_2,
primals_4, 192, XBLOCK=128, num_warps=4, num_stages=1)
del primals_2
del primals_4
return buf1, primals_1, primals_3
def make_kernel(k):
k = torch.tensor(k, dtype=torch.float32)
if k.ndim == 1:
k = k[None, :] * k[:, None]
k /= k.sum()
return k
def upfirdn2d_native(input, kernel, up_x, up_y, down_x, down_y, pad_x0,
pad_x1, pad_y0, pad_y1):
_, channel, in_h, in_w = input.shape
input = input.reshape(-1, in_h, in_w, 1)
_, in_h, in_w, minor = input.shape
kernel_h, kernel_w = kernel.shape
out = input.view(-1, in_h, 1, in_w, 1, minor)
out = F.pad(out, [0, 0, 0, up_x - 1, 0, 0, 0, up_y - 1])
out = out.view(-1, in_h * up_y, in_w * up_x, minor)
out = F.pad(out, [0, 0, max(pad_x0, 0), max(pad_x1, 0), max(pad_y0, 0),
max(pad_y1, 0)])
out = out[:, max(-pad_y0, 0):out.shape[1] - max(-pad_y1, 0), max(-
pad_x0, 0):out.shape[2] - max(-pad_x1, 0), :]
out = out.permute(0, 3, 1, 2)
out = out.reshape([-1, 1, in_h * up_y + pad_y0 + pad_y1, in_w * up_x +
pad_x0 + pad_x1])
w = torch.flip(kernel, [0, 1]).view(1, 1, kernel_h, kernel_w)
out = F.conv2d(out, w)
out = out.reshape(-1, minor, in_h * up_y + pad_y0 + pad_y1 - kernel_h +
1, in_w * up_x + pad_x0 + pad_x1 - kernel_w + 1)
out = out.permute(0, 2, 3, 1)
out = out[:, ::down_y, ::down_x, :]
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
return out.view(-1, channel, out_h, out_w)
def upfirdn2d(input, kernel, up=1, down=1, pad=(0, 0)):
if input.device.type == 'cpu':
out = upfirdn2d_native(input, kernel, up, up, down, down, pad[0],
pad[1], pad[0], pad[1])
else:
out = UpFirDn2d.apply(input, kernel, (up, up), (down, down), (pad[0
], pad[1], pad[0], pad[1]))
return out
class UpFirDn2dBackward(Function):
@staticmethod
def forward(ctx, grad_output, kernel, grad_kernel, up, down, pad, g_pad,
in_size, out_size):
up_x, up_y = up
down_x, down_y = down
g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1 = g_pad
grad_output = grad_output.reshape(-1, out_size[0], out_size[1], 1)
grad_input = upfirdn2d_op.upfirdn2d(grad_output, grad_kernel,
down_x, down_y, up_x, up_y, g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1)
grad_input = grad_input.view(in_size[0], in_size[1], in_size[2],
in_size[3])
ctx.save_for_backward(kernel)
pad_x0, pad_x1, pad_y0, pad_y1 = pad
ctx.up_x = up_x
ctx.up_y = up_y
ctx.down_x = down_x
ctx.down_y = down_y
ctx.pad_x0 = pad_x0
ctx.pad_x1 = pad_x1
ctx.pad_y0 = pad_y0
ctx.pad_y1 = pad_y1
ctx.in_size = in_size
ctx.out_size = out_size
return grad_input
@staticmethod
def backward(ctx, gradgrad_input):
kernel, = ctx.saved_tensors
gradgrad_input = gradgrad_input.reshape(-1, ctx.in_size[2], ctx.
in_size[3], 1)
gradgrad_out = upfirdn2d_op.upfirdn2d(gradgrad_input, kernel, ctx.
up_x, ctx.up_y, ctx.down_x, ctx.down_y, ctx.pad_x0, ctx.pad_x1,
ctx.pad_y0, ctx.pad_y1)
gradgrad_out = gradgrad_out.view(ctx.in_size[0], ctx.in_size[1],
ctx.out_size[0], ctx.out_size[1])
return gradgrad_out, None, None, None, None, None, None, None, None
class UpFirDn2d(Function):
@staticmethod
def forward(ctx, input, kernel, up, down, pad):
up_x, up_y = up
down_x, down_y = down
pad_x0, pad_x1, pad_y0, pad_y1 = pad
kernel_h, kernel_w = kernel.shape
_batch, channel, in_h, in_w = input.shape
ctx.in_size = input.shape
input = input.reshape(-1, in_h, in_w, 1)
ctx.save_for_backward(kernel, torch.flip(kernel, [0, 1]))
out_h = (in_h * up_y + pad_y0 + pad_y1 - kernel_h) // down_y + 1
out_w = (in_w * up_x + pad_x0 + pad_x1 - kernel_w) // down_x + 1
ctx.out_size = out_h, out_w
ctx.up = up_x, up_y
ctx.down = down_x, down_y
ctx.pad = pad_x0, pad_x1, pad_y0, pad_y1
g_pad_x0 = kernel_w - pad_x0 - 1
g_pad_y0 = kernel_h - pad_y0 - 1
g_pad_x1 = in_w * up_x - out_w * down_x + pad_x0 - up_x + 1
g_pad_y1 = in_h * up_y - out_h * down_y + pad_y0 - up_y + 1
ctx.g_pad = g_pad_x0, g_pad_x1, g_pad_y0, g_pad_y1
out = upfirdn2d_op.upfirdn2d(input, kernel, up_x, up_y, down_x,
down_y, pad_x0, pad_x1, pad_y0, pad_y1)
out = out.view(-1, channel, out_h, out_w)
return out
@staticmethod
def backward(ctx, grad_output):
kernel, grad_kernel = ctx.saved_tensors
grad_input = UpFirDn2dBackward.apply(grad_output, kernel,
grad_kernel, ctx.up, ctx.down, ctx.pad, ctx.g_pad, ctx.in_size,
ctx.out_size)
return grad_input, None, None, None, None
class Upsample(nn.Module):
def __init__(self, kernel, factor=2):
super().__init__()
self.factor = factor
kernel = make_kernel(kernel) * factor ** 2
self.register_buffer('kernel', kernel)
p = kernel.shape[0] - factor
pad0 = (p + 1) // 2 + factor - 1
pad1 = p // 2
self.pad = pad0, pad1
def forward(self, input):
out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=
self.pad)
return out
class ToRGBNew(nn.Module):
def __init__(self, in_channel, upsample=True, resolution=None,
blur_kernel=[1, 3, 3, 1]):
super().__init__()
self.is_upsample = upsample
self.resolution = resolution
if upsample:
self.upsample = Upsample(blur_kernel)
self.conv = nn.Conv2d(in_channel, 3, kernel_size=1)
self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1))
def flops(self):
m = self.conv
kernel_ops = torch.zeros(m.weight.size()[2:]).numel()
bias_ops = 1
flops = 1 * self.resolution * self.resolution * 3 * (m.in_channels //
m.groups * kernel_ops + bias_ops)
if self.is_upsample:
w_shape = 1, 1, 4, 4
kernel_ops = torch.zeros(w_shape[2:]).numel()
flops = 1 * 3 * self.resolution * self.resolution * (3 * kernel_ops
)
return flops
def forward(self, input_0):
primals_4 = self.bias
primals_1 = self.conv.weight
primals_2 = self.conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
G-arj/StyleSwin
|
ToRGB
| false | 13,706 |
[
"MIT"
] | 398 |
0c592b3334159613ebe4a33bd6c4ea042dac42d4
|
https://github.com/G-arj/StyleSwin/tree/0c592b3334159613ebe4a33bd6c4ea042dac42d4
|
AdaptiveInstanceNorm
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/yg/cygzi6evso6kefobgrwgjcxh5qgbn7zouyf7to742xeh6bsflplb.py
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
# Source node to ATen node mapping:
# mul => mul
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, 0.5), kwargs = {})
triton_poi_fused_mul_0 = async_compile.triton('triton_poi_fused_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + (x0), tmp2, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/w2/cw2ewjqp7cqlou66lpyb4lfrpmwfkmlgkmopwppntv4ld2kbxemk.py
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# instance_norm => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%unsqueeze_1, [0, 2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt : [num_users=1] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_poi_fused__native_batch_norm_legit_1 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_1(in_ptr0, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + (x0), tmp8, xmask)
tl.store(out_ptr1 + (x0), tmp23, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/bs/cbsry3mpblzk2o5kxvt5pk3hutaioj4ipafivzfkl7hyfyoyiday.py
# Topologically Sorted Source Nodes: [mul_2, out_2], Original ATen: [aten.mul, aten.add]
# Source node to ATen node mapping:
# mul_2 => mul_3
# out_2 => add_1
# Graph fragment:
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %squeeze_2), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %getitem_1), kwargs = {})
triton_poi_fused_add_mul_2 = async_compile.triton('triton_poi_fused_add_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + (8*x2)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + (x3), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + x1 + (8*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 - tmp6
tmp9 = tmp7 * tmp8
tmp10 = tmp4 * tmp9
tmp13 = tmp12 * tmp2
tmp14 = tmp11 + tmp13
tmp15 = tmp10 + tmp14
tl.store(out_ptr0 + (x4), tmp15, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul], Original ATen: [aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_0.run(primals_1, buf0, 32, grid=grid(32), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4), 0), out=buf1)
del buf0
buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
buf3 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
# Topologically Sorted Source Nodes: [instance_norm], Original ATen: [aten._native_batch_norm_legit]
triton_poi_fused__native_batch_norm_legit_1.run(primals_4, buf2, buf3, 4, grid=grid(4), stream=stream0)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul_2, out_2], Original ATen: [aten.mul, aten.add]
triton_poi_fused_add_mul_2.run(buf1, primals_2, primals_4, buf2, buf3, buf4, 64, grid=grid(64), stream=stream0)
del buf1
del buf2
del buf3
del primals_2
return (buf4, primals_3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((8, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.autograd import Function
import math
import torch
from torch import nn
from torch.nn import functional as F
from torch.cuda.amp import custom_fwd
from torch.cuda.amp import custom_bwd
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
if input.device.type == 'cpu':
rest_dim = [1] * (input.ndim - bias.ndim - 1)
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim),
negative_slope=0.2) * scale
else:
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class AdaptiveInstanceNorm(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm1d(in_channel)
self.style = EqualLinear(style_dim, in_channel * 2)
def forward(self, input, style):
style = self.style(style).unsqueeze(-1)
gamma, beta = style.chunk(2, 1)
out = self.norm(input)
out = gamma * out + beta
return out
def get_inputs():
return [torch.rand([4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {'in_channel': 4, 'style_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import math
from torch import nn
from torch.nn import functional as F
from torch.cuda.amp import custom_fwd
from torch.cuda.amp import custom_bwd
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.5
tmp2 = tmp0 * tmp1
tl.store(out_ptr0 + x0, tmp2, xmask)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_1(in_ptr0, out_ptr0, out_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp7 = 4.0
tmp8 = tmp6 / tmp7
tmp9 = tmp0 - tmp8
tmp10 = tmp9 * tmp9
tmp11 = tmp1 - tmp8
tmp12 = tmp11 * tmp11
tmp13 = tmp10 + tmp12
tmp14 = tmp3 - tmp8
tmp15 = tmp14 * tmp14
tmp16 = tmp13 + tmp15
tmp17 = tmp5 - tmp8
tmp18 = tmp17 * tmp17
tmp19 = tmp16 + tmp18
tmp20 = tmp19 / tmp7
tmp21 = 1e-05
tmp22 = tmp20 + tmp21
tmp23 = libdevice.rsqrt(tmp22)
tl.store(out_ptr0 + x0, tmp8, xmask)
tl.store(out_ptr1 + x0, tmp23, xmask)
@triton.jit
def triton_poi_fused_add_mul_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1 + 8 * x2), xmask, eviction_policy='evict_last'
)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr2 + x3, xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (4 + x1 + 8 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (4 + x1), xmask, eviction_policy='evict_last')
tmp2 = 1.0
tmp3 = tmp1 * tmp2
tmp4 = tmp0 + tmp3
tmp7 = tmp5 - tmp6
tmp9 = tmp7 * tmp8
tmp10 = tmp4 * tmp9
tmp13 = tmp12 * tmp2
tmp14 = tmp11 + tmp13
tmp15 = tmp10 + tmp14
tl.store(out_ptr0 + x4, tmp15, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (8, 4), (4, 1))
assert_size_stride(primals_2, (8,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((8, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_0[grid(32)](primals_1, buf0, 32, XBLOCK=32,
num_warps=1, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 8), (8, 1), torch.float32)
extern_kernels.mm(primals_3, reinterpret_tensor(buf0, (4, 8), (1, 4
), 0), out=buf1)
del buf0
buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
buf3 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
triton_poi_fused__native_batch_norm_legit_1[grid(4)](primals_4,
buf2, buf3, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_mul_2[grid(64)](buf1, primals_2, primals_4,
buf2, buf3, buf4, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
del buf2
del buf3
del primals_2
return buf4, primals_3, primals_4
def fused_leaky_relu(input, bias, negative_slope=0.2, scale=2 ** 0.5):
if input.device.type == 'cpu':
rest_dim = [1] * (input.ndim - bias.ndim - 1)
return F.leaky_relu(input + bias.view(1, bias.shape[0], *rest_dim),
negative_slope=0.2) * scale
else:
return FusedLeakyReLUFunction.apply(input, bias, negative_slope, scale)
class FusedLeakyReLUFunctionBackward(Function):
@staticmethod
def forward(ctx, grad_output, out, negative_slope, scale):
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
empty = grad_output.new_empty(0)
grad_input = fused.fused_bias_act(grad_output, empty, out, 3, 1,
negative_slope, scale)
dim = [0]
if grad_input.ndim > 2:
dim += list(range(2, grad_input.ndim))
grad_bias = grad_input.sum(dim).detach()
return grad_input, grad_bias
@staticmethod
def backward(ctx, gradgrad_input, gradgrad_bias):
out, = ctx.saved_tensors
gradgrad_out = fused.fused_bias_act(gradgrad_input, gradgrad_bias,
out, 3, 1, ctx.negative_slope, ctx.scale)
return gradgrad_out, None, None, None
class FusedLeakyReLUFunction(Function):
@staticmethod
@custom_fwd(cast_inputs=torch.float32)
def forward(ctx, input, bias, negative_slope, scale):
empty = input.new_empty(0)
out = fused.fused_bias_act(input, bias, empty, 3, 0, negative_slope,
scale)
ctx.save_for_backward(out)
ctx.negative_slope = negative_slope
ctx.scale = scale
return out
@staticmethod
@custom_bwd
def backward(ctx, grad_output):
out, = ctx.saved_tensors
grad_input, grad_bias = FusedLeakyReLUFunctionBackward.apply(
grad_output, out, ctx.negative_slope, ctx.scale)
return grad_input, grad_bias, None, None
class EqualLinear(nn.Module):
def __init__(self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1,
activation=None):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul))
if bias:
self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init))
else:
self.bias = None
self.activation = activation
self.scale = 1 / math.sqrt(in_dim) * lr_mul
self.lr_mul = lr_mul
def forward(self, input):
if self.activation:
out = F.linear(input, self.weight * self.scale)
out = fused_leaky_relu(out, self.bias * self.lr_mul)
else:
out = F.linear(input, self.weight * self.scale, bias=self.bias *
self.lr_mul)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})'
)
class AdaptiveInstanceNormNew(nn.Module):
def __init__(self, in_channel, style_dim):
super().__init__()
self.norm = nn.InstanceNorm1d(in_channel)
self.style = EqualLinear(style_dim, in_channel * 2)
def forward(self, input_0, input_1):
primals_1 = self.style.weight
primals_2 = self.style.bias
primals_3 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
G-arj/StyleSwin
|
AdaptiveInstanceNorm
| false | 13,707 |
[
"MIT"
] | 398 |
0c592b3334159613ebe4a33bd6c4ea042dac42d4
|
https://github.com/G-arj/StyleSwin/tree/0c592b3334159613ebe4a33bd6c4ea042dac42d4
|
VAE
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/q7/cq75winnysem6xhosadt6noej64bzsxr6gzsm2fc2lah52csbkdm.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_0 = async_compile.triton('triton_poi_fused_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (64*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lq/clqpojw3nbzqfutiuorzwvs6xjljcuuy2acp4zwufgezfm6n5yiq.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_1 = async_compile.triton('triton_poi_fused_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (16384*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wr/cwrbaplpfk7m6giisotqeykajo7urpubzk4y7hl6wjrhxxtwwukj.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_2 = async_compile.triton('triton_poi_fused_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (512*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dx/cdx5ml2qpofihmmpnvabqkpaoyptwmwdx4jtjzptieewtlhrqlmf.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_3 = async_compile.triton('triton_poi_fused_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1024*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kv/ckvorupxanzrceis7ogps6qnxhad4srcb6zrfzpkwhenxdnsalg7.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_4 = async_compile.triton('triton_poi_fused_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768, 16], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 32768
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (16*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (2048*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/gj/cgjk6oyn7d2k7tawn6q6nelsui2ldu54ytbdku7v7hgqzgohxqri.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_5 = async_compile.triton('triton_poi_fused_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 131072
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = (yindex // 128)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (128*x2) + (3200*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xd/cxdxb7tcecdrygp7d6dxpeakmpxug2fn4gzukjyf4vazkydyidln.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_6 = async_compile.triton('triton_poi_fused_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192, 32], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 8192
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = (yindex // 64)
tmp0 = tl.load(in_ptr0 + (x2 + (25*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (64*x2) + (1600*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zc/czcxgooldgpjdotlr54s2gygpkelgbayy4gcii54levkma7slwhu.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_7 = async_compile.triton('triton_poi_fused_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[2048, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 2048
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = (yindex // 32)
tmp0 = tl.load(in_ptr0 + (x2 + (36*y3)), xmask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (32*x2) + (1152*y1)), tmp0, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mb/cmb4laghcqbimbfo3gxc7yvu357kjhuqejcm43fs6qql2j2esyav.py
# Unsorted Source Nodes: [], Original ATen: []
# Source node to ATen node mapping:
triton_poi_fused_8 = async_compile.triton('triton_poi_fused_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 128
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (36*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (144*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/nn/cnnrha2fherbxf4u4ol3reswxwrhd2on7n4ktcvs6jj5lim7f4hb.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_9 = async_compile.triton('triton_poi_fused_convolution_relu_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_9', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/e3/ce32guj5uo4yfbgfyav7w7f5l7pqh2dwdpgu5s7bvggocb654zst.py
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_1 => convolution_1
# x_1 => relu_1
# Graph fragment:
# %convolution_1 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_1,), kwargs = {})
triton_poi_fused_convolution_relu_10 = async_compile.triton('triton_poi_fused_convolution_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_10', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/iy/ciyceulo5ucqtwtx5ngamvwhtb6klh6npn5n2vyna4e3z3wvxoh7.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => relu_2
# Graph fragment:
# %convolution_2 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_1, %primals_6, %primals_7, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_2 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_2,), kwargs = {})
triton_poi_fused_convolution_relu_11 = async_compile.triton('triton_poi_fused_convolution_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_11', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 18432
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qo/cqoyhizmed2eqczsctwwli6z6t6s7lmxxawtcrmzjclvfd2clc7v.py
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# conv2d_3 => convolution_3
# x_3 => relu_3
# Graph fragment:
# %convolution_3 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_2, %primals_8, %primals_9, [2, 2], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu_3 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_3,), kwargs = {})
# %le_4 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_3, 0), kwargs = {})
triton_poi_fused_convolution_relu_threshold_backward_12 = async_compile.triton('triton_poi_fused_convolution_relu_threshold_backward_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024, 4], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*i1', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_threshold_backward_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0, in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 1024
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = (yindex // 256)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (256*x2) + (1024*y1)), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + (4*y3)), tmp4, xmask)
tl.store(out_ptr1 + (y0 + (256*x2) + (1024*y1)), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/z6/cz6agmgbvpbegb2vpnyhdf5hbitnlkigqr3264t2oidwyhq5zbt5.py
# Topologically Sorted Source Nodes: [sigma, mul, z], Original ATen: [aten.exp, aten.mul, aten.add]
# Source node to ATen node mapping:
# mul => mul
# sigma => exp
# z => add
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%addmm_1,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%randn, %exp), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %addmm), kwargs = {})
triton_poi_fused_add_exp_mul_13 = async_compile.triton('triton_poi_fused_add_exp_mul_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_exp_mul_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_exp_mul_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask)
tmp4 = tl.load(in_ptr2 + (x0), xmask)
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yo/cyo7mrbkbxytlgwjzo5zy6seg3g4hmpc4npfybdxetljslnyaoyn.py
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_5 => relu_4
# Graph fragment:
# %add_tensor : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mm_default, %primals_15), kwargs = {})
# %relu_4 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_tensor,), kwargs = {})
# %le_3 : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_4, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_14 = async_compile.triton('triton_poi_fused_relu_threshold_backward_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4096],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_14', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4096
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + (x2), None)
tmp1 = tl.load(in_ptr0 + (x0), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, None)
tl.store(out_ptr0 + (x2), tmp6, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wb/cwbu4ghjbsppchfi4n3xvx6dxq2prkxpnirbyjw4k26veiz3bwbp.py
# Topologically Sorted Source Nodes: [conv_transpose2d, x_7], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d => convolution_4
# x_7 => relu_5
# Graph fragment:
# %convolution_4 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%unsqueeze_1, %primals_16, %primals_17, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu_5 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_4,), kwargs = {})
triton_poi_fused_convolution_relu_15 = async_compile.triton('triton_poi_fused_convolution_relu_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_15', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ki/ckiytdtk6bbkxiexegri7l2e2lsqu73pxroxeibcovuhiymcvnh2.py
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_8], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d_1 => convolution_5
# x_8 => relu_6
# Graph fragment:
# %convolution_5 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_5, %primals_18, %primals_19, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu_6 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_5,), kwargs = {})
triton_poi_fused_convolution_relu_16 = async_compile.triton('triton_poi_fused_convolution_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_16', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ww/cww3gdzm5h4lqdcqon5gadse23acwjsz5ic2xribo6zzsegnijfr.py
# Topologically Sorted Source Nodes: [conv_transpose2d_2, x_9], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv_transpose2d_2 => convolution_6
# x_9 => relu_7
# Graph fragment:
# %convolution_6 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_6, %primals_20, %primals_21, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %relu_7 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution_6,), kwargs = {})
triton_poi_fused_convolution_relu_17 = async_compile.triton('triton_poi_fused_convolution_relu_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_17', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ly/clysjt2kizpxijyx7ylwzkxpuhfguzgwqd3wjbbysivophebutqz.py
# Topologically Sorted Source Nodes: [conv_transpose2d_3, reconstruction], Original ATen: [aten.convolution, aten.sigmoid]
# Source node to ATen node mapping:
# conv_transpose2d_3 => convolution_7
# reconstruction => sigmoid
# Graph fragment:
# %convolution_7 : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%relu_7, %primals_22, %primals_23, [2, 2], [0, 0], [1, 1], True, [0, 0], 1), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution_7,), kwargs = {})
triton_poi_fused_convolution_sigmoid_18 = async_compile.triton('triton_poi_fused_convolution_sigmoid_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_sigmoid_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_sigmoid_18(in_ptr0, in_ptr1, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16384*y1)), ymask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (y0), ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x2 + (4096*y3)), tmp3, ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64, ), (1, ))
assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_7, (128, ), (1, ))
assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_9, (256, ), (1, ))
assert_size_stride(primals_10, (4, 1024), (1024, 1))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, 1024), (1024, 1))
assert_size_stride(primals_13, (4, ), (1, ))
assert_size_stride(primals_14, (1024, 4), (4, 1))
assert_size_stride(primals_15, (1024, ), (1, ))
assert_size_stride(primals_16, (1024, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_17, (128, ), (1, ))
assert_size_stride(primals_18, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_19, (64, ), (1, ))
assert_size_stride(primals_20, (64, 32, 6, 6), (1152, 36, 6, 1))
assert_size_stride(primals_21, (32, ), (1, ))
assert_size_stride(primals_22, (32, 4, 6, 6), (144, 36, 6, 1))
assert_size_stride(primals_23, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 4, 4, 4), (64, 1, 16, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
stream0 = get_raw_stream(0)
triton_poi_fused_0.run(primals_1, buf0, 128, 16, grid=grid(128, 16), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_1.run(primals_3, buf1, 16, 4096, grid=grid(16, 4096), stream=stream0)
del primals_3
buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_2.run(primals_4, buf2, 2048, 16, grid=grid(2048, 16), stream=stream0)
del primals_4
buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_3.run(primals_6, buf3, 8192, 16, grid=grid(8192, 16), stream=stream0)
del primals_6
buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_4.run(primals_8, buf4, 32768, 16, grid=grid(32768, 16), stream=stream0)
del primals_8
buf5 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_5.run(primals_16, buf5, 131072, 25, grid=grid(131072, 25), stream=stream0)
del primals_16
buf6 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_6.run(primals_18, buf6, 8192, 25, grid=grid(8192, 25), stream=stream0)
del primals_18
buf7 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_7.run(primals_20, buf7, 2048, 36, grid=grid(2048, 36), stream=stream0)
del primals_20
buf8 = empty_strided_cuda((32, 4, 6, 6), (144, 1, 24, 4), torch.float32)
# Unsorted Source Nodes: [], Original ATen: []
triton_poi_fused_8.run(primals_22, buf8, 128, 36, grid=grid(128, 36), stream=stream0)
del primals_22
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf9 = extern_kernels.convolution(buf1, buf0, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 31, 31), (30752, 1, 992, 32))
buf10 = buf9; del buf9 # reuse
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_9.run(buf10, primals_2, 123008, grid=grid(123008), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf11 = extern_kernels.convolution(buf10, buf2, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 14, 14), (12544, 1, 896, 64))
buf12 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, x_1], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_10.run(buf12, primals_5, 50176, grid=grid(50176), stream=stream0)
del primals_5
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf13 = extern_kernels.convolution(buf12, buf3, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 6, 6), (4608, 1, 768, 128))
buf14 = buf13; del buf13 # reuse
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_11.run(buf14, primals_7, 18432, grid=grid(18432), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf15 = extern_kernels.convolution(buf14, buf4, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 256, 2, 2), (1024, 1, 512, 256))
buf16 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.float32)
buf33 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.relu, aten.threshold_backward]
triton_poi_fused_convolution_relu_threshold_backward_12.run(buf15, primals_9, buf16, buf33, 1024, 4, grid=grid(1024, 4), stream=stream0)
del primals_9
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mu], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_11, reinterpret_tensor(buf16, (4, 1024), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf17)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [logsigma], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_13, reinterpret_tensor(buf16, (4, 1024), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1, 1024), 0), alpha=1, beta=1, out=buf18)
del primals_13
# Topologically Sorted Source Nodes: [eps], Original ATen: [aten.randn_like]
buf19 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32, device=device(type='cuda', index=0), pin_memory=False)
buf20 = buf19
del buf19
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sigma, mul, z], Original ATen: [aten.exp, aten.mul, aten.add]
triton_poi_fused_add_exp_mul_13.run(buf20, buf18, buf17, buf21, 16, grid=grid(16), stream=stream0)
buf22 = reinterpret_tensor(buf15, (4, 1024), (1024, 1), 0); del buf15 # reuse
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(buf21, reinterpret_tensor(primals_14, (4, 1024), (1, 4), 0), out=buf22)
buf23 = buf22; del buf22 # reuse
buf32 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_5], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_14.run(buf23, primals_15, buf32, 4096, grid=grid(4096), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv_transpose2d], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4, 1024, 1, 1), (1024, 1, 0, 0), 0), buf5, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 5, 5), (3200, 1, 640, 128))
buf25 = buf24; del buf24 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d, x_7], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_15.run(buf25, primals_17, 12800, grid=grid(12800), stream=stream0)
del primals_17
# Topologically Sorted Source Nodes: [conv_transpose2d_1], Original ATen: [aten.convolution]
buf26 = extern_kernels.convolution(buf25, buf6, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 64, 13, 13), (10816, 1, 832, 64))
buf27 = buf26; del buf26 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_1, x_8], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_16.run(buf27, primals_19, 43264, grid=grid(43264), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv_transpose2d_2], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, buf7, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 32, 30, 30), (28800, 1, 960, 32))
buf29 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [conv_transpose2d_2, x_9], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_17.run(buf29, primals_21, 115200, grid=grid(115200), stream=stream0)
del primals_21
# Topologically Sorted Source Nodes: [conv_transpose2d_3], Original ATen: [aten.convolution]
buf30 = extern_kernels.convolution(buf29, buf8, stride=(2, 2), padding=(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 4, 64, 64), (16384, 1, 256, 4))
buf31 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv_transpose2d_3, reconstruction], Original ATen: [aten.convolution, aten.sigmoid]
triton_poi_fused_convolution_sigmoid_18.run(buf30, primals_23, buf31, 16, 4096, grid=grid(16, 4096), stream=stream0)
del buf30
del primals_23
return (buf31, buf17, buf18, buf0, buf1, buf2, buf3, buf4, buf5, buf6, buf7, buf8, buf10, buf12, buf14, reinterpret_tensor(buf16, (4, 1024), (1024, 1), 0), buf18, buf20, buf21, reinterpret_tensor(buf23, (4, 1024, 1, 1), (1024, 1, 1, 1), 0), buf25, buf27, buf29, buf31, buf32, primals_14, primals_12, primals_10, buf33, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((64, 32, 4, 4), (512, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((128, 64, 4, 4), (1024, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((256, 128, 4, 4), (2048, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, 1024), (1024, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((1024, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((1024, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((1024, 128, 5, 5), (3200, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((128, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((64, 32, 6, 6), (1152, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((32, 4, 6, 6), (144, 36, 6, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
class Decoder(nn.Module):
""" VAE decoder """
def __init__(self, img_channels, latent_size):
super(Decoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.fc1 = nn.Linear(latent_size, 1024)
self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = x.unsqueeze(-1).unsqueeze(-1)
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = F.relu(self.deconv3(x))
reconstruction = F.sigmoid(self.deconv4(x))
return reconstruction
class Encoder(nn.Module):
""" VAE encoder """
def __init__(self, img_channels, latent_size):
super(Encoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.fc_mu = nn.Linear(2 * 2 * 256, latent_size)
self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1)
mu = self.fc_mu(x)
logsigma = self.fc_logsigma(x)
return mu, logsigma
class VAE(nn.Module):
""" Variational Autoencoder """
def __init__(self, img_channels, latent_size):
super(VAE, self).__init__()
self.encoder = Encoder(img_channels, latent_size)
self.decoder = Decoder(img_channels, latent_size)
def forward(self, x):
mu, logsigma = self.encoder(x)
sigma = logsigma.exp()
eps = torch.randn_like(sigma)
z = eps.mul(sigma).add_(mu)
recon_x = self.decoder(z)
return recon_x, mu, logsigma
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'img_channels': 4, 'latent_size': 4}]
|
import torch
from torch import device
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.utils.data
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 64 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 512 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_3(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1024 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_4(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 16 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 2048 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_5(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = (tl.program_id(1) + tl.program_id(2) * tl.num_programs(1)
) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 128
y1 = yindex // 128
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 128 * x2 + 3200 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_6(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 25
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 64
y1 = yindex // 64
tmp0 = tl.load(in_ptr0 + (x2 + 25 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 64 * x2 + 1600 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_7(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 32
y1 = yindex // 32
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 32 * x2 + 1152 * y1), tmp0, xmask)
@triton.jit
def triton_poi_fused_8(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 128
xnumel = 36
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 36 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 144 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_relu_9(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 123008
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_10(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 50176
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_11(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, None)
@triton.jit
def triton_poi_fused_convolution_relu_threshold_backward_12(in_ptr0,
in_ptr1, out_ptr0, out_ptr1, ynumel, xnumel, YBLOCK: tl.constexpr,
XBLOCK: tl.constexpr):
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 256
y1 = yindex // 256
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 256 * x2 + 1024 * y1), xmask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1, 1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(out_ptr0 + (x2 + 4 * y3), tmp4, xmask)
tl.store(out_ptr1 + (y0 + 256 * x2 + 1024 * y1), tmp6, xmask)
@triton.jit
def triton_poi_fused_add_exp_mul_13(in_ptr0, in_ptr1, in_ptr2, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask)
tmp4 = tl.load(in_ptr2 + x0, xmask)
tmp2 = tl_math.exp(tmp1)
tmp3 = tmp0 * tmp2
tmp5 = tmp3 + tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_14(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x2 = xindex
x0 = xindex % 1024
tmp0 = tl.load(in_out_ptr0 + x2, None)
tmp1 = tl.load(in_ptr0 + x0, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, None)
tl.store(out_ptr0 + x2, tmp6, None)
@triton.jit
def triton_poi_fused_convolution_relu_15(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 12800
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 128
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_16(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 43264
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 64
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_17(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 115200
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 32
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_sigmoid_18(in_ptr0, in_ptr1, out_ptr0,
ynumel, xnumel, YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16384 * y1), ymask,
eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + y0, ymask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.sigmoid(tmp2)
tl.store(out_ptr0 + (x2 + 4096 * y3), tmp3, ymask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22, primals_23
) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (64, 32, 4, 4), (512, 16, 4, 1))
assert_size_stride(primals_5, (64,), (1,))
assert_size_stride(primals_6, (128, 64, 4, 4), (1024, 16, 4, 1))
assert_size_stride(primals_7, (128,), (1,))
assert_size_stride(primals_8, (256, 128, 4, 4), (2048, 16, 4, 1))
assert_size_stride(primals_9, (256,), (1,))
assert_size_stride(primals_10, (4, 1024), (1024, 1))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4, 1024), (1024, 1))
assert_size_stride(primals_13, (4,), (1,))
assert_size_stride(primals_14, (1024, 4), (4, 1))
assert_size_stride(primals_15, (1024,), (1,))
assert_size_stride(primals_16, (1024, 128, 5, 5), (3200, 25, 5, 1))
assert_size_stride(primals_17, (128,), (1,))
assert_size_stride(primals_18, (128, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_19, (64,), (1,))
assert_size_stride(primals_20, (64, 32, 6, 6), (1152, 36, 6, 1))
assert_size_stride(primals_21, (32,), (1,))
assert_size_stride(primals_22, (32, 4, 6, 6), (144, 36, 6, 1))
assert_size_stride(primals_23, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((32, 4, 4, 4), (64, 1, 16, 4), torch.float32)
get_raw_stream(0)
triton_poi_fused_0[grid(128, 16)](primals_1, buf0, 128, 16, XBLOCK=
16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch
.float32)
triton_poi_fused_1[grid(16, 4096)](primals_3, buf1, 16, 4096,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del primals_3
buf2 = empty_strided_cuda((64, 32, 4, 4), (512, 1, 128, 32), torch.
float32)
triton_poi_fused_2[grid(2048, 16)](primals_4, buf2, 2048, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_4
buf3 = empty_strided_cuda((128, 64, 4, 4), (1024, 1, 256, 64),
torch.float32)
triton_poi_fused_3[grid(8192, 16)](primals_6, buf3, 8192, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_6
buf4 = empty_strided_cuda((256, 128, 4, 4), (2048, 1, 512, 128),
torch.float32)
triton_poi_fused_4[grid(32768, 16)](primals_8, buf4, 32768, 16,
XBLOCK=16, YBLOCK=64, num_warps=4, num_stages=1)
del primals_8
buf5 = empty_strided_cuda((1024, 128, 5, 5), (3200, 1, 640, 128),
torch.float32)
triton_poi_fused_5[grid(131072, 25)](primals_16, buf5, 131072, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_16
buf6 = empty_strided_cuda((128, 64, 5, 5), (1600, 1, 320, 64),
torch.float32)
triton_poi_fused_6[grid(8192, 25)](primals_18, buf6, 8192, 25,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_18
buf7 = empty_strided_cuda((64, 32, 6, 6), (1152, 1, 192, 32), torch
.float32)
triton_poi_fused_7[grid(2048, 36)](primals_20, buf7, 2048, 36,
XBLOCK=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_20
buf8 = empty_strided_cuda((32, 4, 6, 6), (144, 1, 24, 4), torch.float32
)
triton_poi_fused_8[grid(128, 36)](primals_22, buf8, 128, 36, XBLOCK
=32, YBLOCK=32, num_warps=4, num_stages=1)
del primals_22
buf9 = extern_kernels.convolution(buf1, buf0, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf9, (4, 32, 31, 31), (30752, 1, 992, 32))
buf10 = buf9
del buf9
triton_poi_fused_convolution_relu_9[grid(123008)](buf10, primals_2,
123008, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_2
buf11 = extern_kernels.convolution(buf10, buf2, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf11, (4, 64, 14, 14), (12544, 1, 896, 64))
buf12 = buf11
del buf11
triton_poi_fused_convolution_relu_10[grid(50176)](buf12, primals_5,
50176, XBLOCK=512, num_warps=4, num_stages=1)
del primals_5
buf13 = extern_kernels.convolution(buf12, buf3, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf13, (4, 128, 6, 6), (4608, 1, 768, 128))
buf14 = buf13
del buf13
triton_poi_fused_convolution_relu_11[grid(18432)](buf14, primals_7,
18432, XBLOCK=256, num_warps=4, num_stages=1)
del primals_7
buf15 = extern_kernels.convolution(buf14, buf4, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf15, (4, 256, 2, 2), (1024, 1, 512, 256))
buf16 = empty_strided_cuda((4, 256, 2, 2), (1024, 4, 2, 1), torch.
float32)
buf33 = empty_strided_cuda((4, 256, 2, 2), (1024, 1, 512, 256),
torch.bool)
triton_poi_fused_convolution_relu_threshold_backward_12[grid(1024, 4)](
buf15, primals_9, buf16, buf33, 1024, 4, XBLOCK=1, YBLOCK=256,
num_warps=4, num_stages=1)
del primals_9
buf17 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_11, reinterpret_tensor(buf16, (4, 1024
), (1024, 1), 0), reinterpret_tensor(primals_10, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf17)
del primals_11
buf18 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_13, reinterpret_tensor(buf16, (4, 1024
), (1024, 1), 0), reinterpret_tensor(primals_12, (1024, 4), (1,
1024), 0), alpha=1, beta=1, out=buf18)
del primals_13
buf19 = torch.ops.aten.randn.default([4, 4], dtype=torch.float32,
device=device(type='cuda', index=0), pin_memory=False)
buf20 = buf19
del buf19
buf21 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
triton_poi_fused_add_exp_mul_13[grid(16)](buf20, buf18, buf17,
buf21, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf22 = reinterpret_tensor(buf15, (4, 1024), (1024, 1), 0)
del buf15
extern_kernels.mm(buf21, reinterpret_tensor(primals_14, (4, 1024),
(1, 4), 0), out=buf22)
buf23 = buf22
del buf22
buf32 = empty_strided_cuda((4, 1024), (1024, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_14[grid(4096)](buf23,
primals_15, buf32, 4096, XBLOCK=256, num_warps=4, num_stages=1)
del primals_15
buf24 = extern_kernels.convolution(reinterpret_tensor(buf23, (4,
1024, 1, 1), (1024, 1, 0, 0), 0), buf5, stride=(2, 2), padding=
(0, 0), dilation=(1, 1), transposed=True, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf24, (4, 128, 5, 5), (3200, 1, 640, 128))
buf25 = buf24
del buf24
triton_poi_fused_convolution_relu_15[grid(12800)](buf25, primals_17,
12800, XBLOCK=256, num_warps=4, num_stages=1)
del primals_17
buf26 = extern_kernels.convolution(buf25, buf6, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf26, (4, 64, 13, 13), (10816, 1, 832, 64))
buf27 = buf26
del buf26
triton_poi_fused_convolution_relu_16[grid(43264)](buf27, primals_19,
43264, XBLOCK=256, num_warps=4, num_stages=1)
del primals_19
buf28 = extern_kernels.convolution(buf27, buf7, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 32, 30, 30), (28800, 1, 960, 32))
buf29 = buf28
del buf28
triton_poi_fused_convolution_relu_17[grid(115200)](buf29,
primals_21, 115200, XBLOCK=1024, num_warps=4, num_stages=1)
del primals_21
buf30 = extern_kernels.convolution(buf29, buf8, stride=(2, 2),
padding=(0, 0), dilation=(1, 1), transposed=True,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf30, (4, 4, 64, 64), (16384, 1, 256, 4))
buf31 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.float32)
triton_poi_fused_convolution_sigmoid_18[grid(16, 4096)](buf30,
primals_23, buf31, 16, 4096, XBLOCK=64, YBLOCK=16, num_warps=4,
num_stages=1)
del buf30
del primals_23
return (buf31, buf17, buf18, buf0, buf1, buf2, buf3, buf4, buf5, buf6,
buf7, buf8, buf10, buf12, buf14, reinterpret_tensor(buf16, (4, 1024
), (1024, 1), 0), buf18, buf20, buf21, reinterpret_tensor(buf23, (4,
1024, 1, 1), (1024, 1, 1, 1), 0), buf25, buf27, buf29, buf31, buf32,
primals_14, primals_12, primals_10, buf33)
class Decoder(nn.Module):
""" VAE decoder """
def __init__(self, img_channels, latent_size):
super(Decoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.fc1 = nn.Linear(latent_size, 1024)
self.deconv1 = nn.ConvTranspose2d(1024, 128, 5, stride=2)
self.deconv2 = nn.ConvTranspose2d(128, 64, 5, stride=2)
self.deconv3 = nn.ConvTranspose2d(64, 32, 6, stride=2)
self.deconv4 = nn.ConvTranspose2d(32, img_channels, 6, stride=2)
def forward(self, x):
x = F.relu(self.fc1(x))
x = x.unsqueeze(-1).unsqueeze(-1)
x = F.relu(self.deconv1(x))
x = F.relu(self.deconv2(x))
x = F.relu(self.deconv3(x))
reconstruction = F.sigmoid(self.deconv4(x))
return reconstruction
class Encoder(nn.Module):
""" VAE encoder """
def __init__(self, img_channels, latent_size):
super(Encoder, self).__init__()
self.latent_size = latent_size
self.img_channels = img_channels
self.conv1 = nn.Conv2d(img_channels, 32, 4, stride=2)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 128, 4, stride=2)
self.conv4 = nn.Conv2d(128, 256, 4, stride=2)
self.fc_mu = nn.Linear(2 * 2 * 256, latent_size)
self.fc_logsigma = nn.Linear(2 * 2 * 256, latent_size)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1)
mu = self.fc_mu(x)
logsigma = self.fc_logsigma(x)
return mu, logsigma
class VAENew(nn.Module):
""" Variational Autoencoder """
def __init__(self, img_channels, latent_size):
super(VAENew, self).__init__()
self.encoder = Encoder(img_channels, latent_size)
self.decoder = Decoder(img_channels, latent_size)
def forward(self, input_0):
primals_1 = self.encoder.conv1.weight
primals_2 = self.encoder.conv1.bias
primals_4 = self.encoder.conv2.weight
primals_5 = self.encoder.conv2.bias
primals_6 = self.encoder.conv3.weight
primals_7 = self.encoder.conv3.bias
primals_8 = self.encoder.conv4.weight
primals_9 = self.encoder.conv4.bias
primals_10 = self.encoder.fc_mu.weight
primals_11 = self.encoder.fc_mu.bias
primals_12 = self.encoder.fc_logsigma.weight
primals_13 = self.encoder.fc_logsigma.bias
primals_14 = self.decoder.fc1.weight
primals_15 = self.decoder.fc1.bias
primals_16 = self.decoder.deconv1.weight
primals_17 = self.decoder.deconv1.bias
primals_18 = self.decoder.deconv2.weight
primals_19 = self.decoder.deconv2.bias
primals_20 = self.decoder.deconv3.weight
primals_21 = self.decoder.deconv3.bias
primals_22 = self.decoder.deconv4.weight
primals_23 = self.decoder.deconv4.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23])
return output[0], output[1], output[2]
|
FabianSchuetze/world-models
|
VAE
| false | 13,708 |
[
"MIT"
] | 440 |
d6abd9ce97409734a766eb67ccf0d1967ba9bf0c
|
https://github.com/FabianSchuetze/world-models/tree/d6abd9ce97409734a766eb67ccf0d1967ba9bf0c
|
FSM
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/is/cispe7zbbl4nxt2jjus6h5iou2w7htohqj7z2oz6g7nqz6vbpbqr.py
# Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# avg_pool2d => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%primals_1, [4, 4]), kwargs = {})
triton_poi_fused_avg_pool2d_0 = async_compile.triton('triton_poi_fused_avg_pool2d_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (16*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (16*x0)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (2 + (16*x0)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (3 + (16*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (4 + (16*x0)), xmask, eviction_policy='evict_last')
tmp9 = tl.load(in_ptr0 + (5 + (16*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (6 + (16*x0)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr0 + (7 + (16*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (8 + (16*x0)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr0 + (9 + (16*x0)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr0 + (10 + (16*x0)), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr0 + (11 + (16*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (12 + (16*x0)), xmask, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr0 + (13 + (16*x0)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr0 + (14 + (16*x0)), xmask, eviction_policy='evict_last')
tmp29 = tl.load(in_ptr0 + (15 + (16*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + (x0), tmp32, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/pb/cpbv2kdrnnh34qohdzbrbszcuxvzwbkong4shzpzh6xv2kb72uot.py
# Topologically Sorted Source Nodes: [atten, feat, x], Original ATen: [aten.sigmoid, aten.mul, aten.add]
# Source node to ATen node mapping:
# atten => sigmoid
# feat => mul
# x => add
# Graph fragment:
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_1, %sigmoid), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %mul), kwargs = {})
triton_poi_fused_add_mul_sigmoid_1 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 16)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [avg_pool2d], Original ATen: [aten.avg_pool2d]
stream0 = get_raw_stream(0)
triton_poi_fused_avg_pool2d_0.run(primals_1, buf0, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [atten, feat, x], Original ATen: [aten.sigmoid, aten.mul, aten.add]
triton_poi_fused_add_mul_sigmoid_1.run(primals_1, buf1, buf2, 256, grid=grid(256), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
return (buf3, primals_1, primals_2, primals_3, buf0, buf1, buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import Tensor
from torch import nn
from torch.nn import functional as F
class FSM(nn.Module):
def __init__(self, c1, c2):
super().__init__()
self.conv_atten = nn.Conv2d(c1, c1, 1, bias=False)
self.conv = nn.Conv2d(c1, c2, 1, bias=False)
def forward(self, x: 'Tensor') ->Tensor:
atten = self.conv_atten(F.avg_pool2d(x, x.shape[2:])).sigmoid()
feat = torch.mul(x, atten)
x = x + feat
return self.conv(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'c1': 4, 'c2': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_avg_pool2d_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 16 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp3 = tl.load(in_ptr0 + (2 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp5 = tl.load(in_ptr0 + (3 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp7 = tl.load(in_ptr0 + (4 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp9 = tl.load(in_ptr0 + (5 + 16 * x0), xmask, eviction_policy='evict_last'
)
tmp11 = tl.load(in_ptr0 + (6 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp13 = tl.load(in_ptr0 + (7 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp15 = tl.load(in_ptr0 + (8 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr0 + (9 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp19 = tl.load(in_ptr0 + (10 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp21 = tl.load(in_ptr0 + (11 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp23 = tl.load(in_ptr0 + (12 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp25 = tl.load(in_ptr0 + (13 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp27 = tl.load(in_ptr0 + (14 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp29 = tl.load(in_ptr0 + (15 + 16 * x0), xmask, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp8 = tmp7 + tmp6
tmp10 = tmp9 + tmp8
tmp12 = tmp11 + tmp10
tmp14 = tmp13 + tmp12
tmp16 = tmp15 + tmp14
tmp18 = tmp17 + tmp16
tmp20 = tmp19 + tmp18
tmp22 = tmp21 + tmp20
tmp24 = tmp23 + tmp22
tmp26 = tmp25 + tmp24
tmp28 = tmp27 + tmp26
tmp30 = tmp29 + tmp28
tmp31 = 0.0625
tmp32 = tmp30 * tmp31
tl.store(out_ptr0 + x0, tmp32, xmask)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 16
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp2 = tl.sigmoid(tmp1)
tmp3 = tmp0 * tmp2
tmp4 = tmp0 + tmp3
tl.store(out_ptr0 + x2, tmp4, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (4, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_avg_pool2d_0[grid(16)](primals_1, buf0, 16, XBLOCK
=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf1, (4, 4, 1, 1), (4, 1, 1, 1))
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_add_mul_sigmoid_1[grid(256)](primals_1, buf1, buf2,
256, XBLOCK=256, num_warps=4, num_stages=1)
buf3 = extern_kernels.convolution(buf2, primals_3, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
return buf3, primals_1, primals_2, primals_3, buf0, buf1, buf2
class FSMNew(nn.Module):
def __init__(self, c1, c2):
super().__init__()
self.conv_atten = nn.Conv2d(c1, c1, 1, bias=False)
self.conv = nn.Conv2d(c1, c2, 1, bias=False)
def forward(self, input_0):
primals_2 = self.conv_atten.weight
primals_3 = self.conv.weight
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Genevievekim/semantic-segmentation-1
|
FSM
| false | 13,709 |
[
"BSD-3-Clause"
] | 196 |
f28b026e44cff80fe3ca4cac94cea27e4073821b
|
https://github.com/Genevievekim/semantic-segmentation-1/tree/f28b026e44cff80fe3ca4cac94cea27e4073821b
|
Quantization
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/h7/ch7hczdm36cqprau4n25dp5lnqu5oniowp4rku4wb5suw5oubenw.py
# Topologically Sorted Source Nodes: [round_1], Original ATen: [aten.round]
# Source node to ATen node mapping:
# round_1 => round_1
# Graph fragment:
# %round_1 : [num_users=1] = call_function[target=torch.ops.aten.round.default](args = (%arg0_1,), kwargs = {})
triton_poi_fused_round_0 = async_compile.triton('triton_poi_fused_round_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_round_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_round_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = libdevice.nearbyint(tmp0)
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [round_1], Original ATen: [aten.round]
stream0 = get_raw_stream(0)
triton_poi_fused_round_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.utils.data
class Quantization(nn.Module):
@staticmethod
def forward(input):
return torch.round(input)
@staticmethod
def backward(grad_output):
grad_input = grad_output.clone()
return grad_input
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_round_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = libdevice.nearbyint(tmp0)
tl.store(out_ptr0 + x0, tmp1, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_round_0[grid(256)](arg0_1, buf0, 256, XBLOCK=256,
num_warps=4, num_stages=1)
del arg0_1
return buf0,
class QuantizationNew(nn.Module):
@staticmethod
def backward(grad_output):
grad_input = grad_output.clone()
return grad_input
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Geunwoo-Jeon/iclr_17_compression
|
Quantization
| false | 13,710 |
[
"MIT"
] | 56 |
a28746b1f1c518d91125d8f289d9511cde488c77
|
https://github.com/Geunwoo-Jeon/iclr_17_compression/tree/a28746b1f1c518d91125d8f289d9511cde488c77
|
UNet
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/jo/cjolh7wy3losq75bea7heuxra52smjn2phczl4xzt2smarbxy3nj.py
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d => convolution
# x => gt, mul, where
# Graph fragment:
# %convolution : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [3, 3], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution, 0), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution, 0.1), kwargs = {})
# %where : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt, %convolution, %mul), kwargs = {})
triton_poi_fused_convolution_leaky_relu_0 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/o4/co4xz3bhphdn2kq3lke3433wpdtqt6r3irqbdr7hp46ou2slvxop.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_1 => avg_pool2d
# Graph fragment:
# %avg_pool2d : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_1, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_1 = async_compile.triton('triton_poi_fused_avg_pool2d_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = (xindex // 32)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + (2*x0) + (128*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/b2/cb2heynzbbb2idhib26qs23x62rr3vu36ahp3tksyhjfahippc67.py
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_2 => convolution_2
# x_2 => gt_2, mul_2, where_2
# Graph fragment:
# %convolution_2 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d, %primals_6, %primals_7, [1, 1], [2, 2], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_2 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_2, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_2, 0.1), kwargs = {})
# %where_2 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %convolution_2, %mul_2), kwargs = {})
triton_poi_fused_convolution_leaky_relu_2 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/xj/cxjbrfzbed7bo2iy4m5zuii5z5cssze6tfcgrk2jehpphz5b77jh.py
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_4 => avg_pool2d_1
# Graph fragment:
# %avg_pool2d_1 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_3, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_3 = async_compile.triton('triton_poi_fused_avg_pool2d_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (32 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + (2*x0) + (64*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/le/cleento7jh4h7b7b25wgw4ax6qfmthojxlfqfgkaohjqgn6pqwco.py
# Topologically Sorted Source Nodes: [conv2d_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_4 => convolution_4
# x_5 => gt_4, mul_4, where_4
# Graph fragment:
# %convolution_4 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_1, %primals_10, %primals_11, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_4 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_4, 0), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_4, 0.1), kwargs = {})
# %where_4 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_4, %convolution_4, %mul_4), kwargs = {})
triton_poi_fused_convolution_leaky_relu_4 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/s5/cs5zukgmdewmnpcrozw2m273bpclzrkypvc2xaub2gmoc5saabvv.py
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_7 => avg_pool2d_2
# Graph fragment:
# %avg_pool2d_2 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_5, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_5 = async_compile.triton('triton_poi_fused_avg_pool2d_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_5', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (16 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + (2*x0) + (32*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/4u/c4urfqsk2wuyrkcnwy7b2uiwmecrugesubdiuadavwqtcisyhwz4.py
# Topologically Sorted Source Nodes: [conv2d_6, x_8], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_6 => convolution_6
# x_8 => gt_6, mul_6, where_6
# Graph fragment:
# %convolution_6 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_2, %primals_14, %primals_15, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_6 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_6, 0), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_6, 0.1), kwargs = {})
# %where_6 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_6, %convolution_6, %mul_6), kwargs = {})
triton_poi_fused_convolution_leaky_relu_6 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_6', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3z/c3zje6b5ccaz3n4winpmxo6y4niaoldocb7ilvkg5sorj2nqvjfa.py
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_10 => avg_pool2d_3
# Graph fragment:
# %avg_pool2d_3 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_7, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_7 = async_compile.triton('triton_poi_fused_avg_pool2d_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16384],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_7(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16384
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (8 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (9 + (2*x0) + (16*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/oy/coyjgpdjbipe737iasihk5ensjtmspgnzblyyy7mrlypqho5vuyg.py
# Topologically Sorted Source Nodes: [conv2d_8, x_11], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_8 => convolution_8
# x_11 => gt_8, mul_8, where_8
# Graph fragment:
# %convolution_8 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_3, %primals_18, %primals_19, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_8 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_8, 0), kwargs = {})
# %mul_8 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_8, 0.1), kwargs = {})
# %where_8 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_8, %convolution_8, %mul_8), kwargs = {})
triton_poi_fused_convolution_leaky_relu_8 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_8', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_8', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/j6/cj6wkwoaluxhwqnux44qht6o5xye6n3bfqi54esxnpytd6m2qyjn.py
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.avg_pool2d]
# Source node to ATen node mapping:
# x_13 => avg_pool2d_4
# Graph fragment:
# %avg_pool2d_4 : [num_users=2] = call_function[target=torch.ops.aten.avg_pool2d.default](args = (%where_9, [2, 2]), kwargs = {})
triton_poi_fused_avg_pool2d_9 = async_compile.triton('triton_poi_fused_avg_pool2d_9', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_avg_pool2d_9', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_avg_pool2d_9(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + ((2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr0 + (1 + (2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (4 + (2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (5 + (2*x0) + (8*x1)), None, eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + (x2), tmp8, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/i6/ci6sgepehwucwp2knnf7ujr55xjh7bis2i3kdyon6flrsjhhdhyi.py
# Topologically Sorted Source Nodes: [conv2d_10, x_14], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_10 => convolution_10
# x_14 => gt_10, mul_10, where_10
# Graph fragment:
# %convolution_10 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%avg_pool2d_4, %primals_22, %primals_23, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_10 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_10, 0), kwargs = {})
# %mul_10 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_10, 0.1), kwargs = {})
# %where_10 : [num_users=2] = call_function[target=torch.ops.aten.where.self](args = (%gt_10, %convolution_10, %mul_10), kwargs = {})
triton_poi_fused_convolution_leaky_relu_10 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_10', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_10', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_10(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ep/cepxp5elnxw6qvzcibzdejr6ov2i7hn664ixt6w4vzlrorsdstiq.py
# Topologically Sorted Source Nodes: [conv2d_11, x_15], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_11 => convolution_11
# x_15 => gt_11
# Graph fragment:
# %convolution_11 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_11 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_11, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_11 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_11', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8192],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_11', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8192
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/r4/cr4hpkpezpllmtrycjdjyfyalsg3igxkpp5ddup6ueansg3uhioj.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_16 => convert_element_type_1
# Graph fragment:
# %convert_element_type_1 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view, torch.int64), kwargs = {})
triton_poi_fused__to_copy_12 = async_compile.triton('triton_poi_fused__to_copy_12', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_12', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/it/citeiab2byvsltguyuzd2s2joq6e6z355s7h7bam6hgio5s5cret.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_16 => add_1, clamp_max
# Graph fragment:
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_1, 1), kwargs = {})
# %clamp_max : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_1, 1), kwargs = {})
triton_poi_fused_add_clamp_13 = async_compile.triton('triton_poi_fused_add_clamp_13', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_13', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = triton_helpers.minimum(tmp10, tmp9)
tl.store(out_ptr0 + (x0), tmp11, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tq/ctqegi24pxetbae63246pykqjalfftx6xr5vt4fhudr7ehpmpbyv.py
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_16 => add, clamp_max_2, clamp_min, clamp_min_2, convert_element_type, iota, mul_12, sub, sub_2
# Graph fragment:
# %iota : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (4,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota, torch.float32), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type, 0.5), kwargs = {})
# %mul_12 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.5), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_12, 0.5), kwargs = {})
# %clamp_min : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub, 0.0), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min, %convert_element_type_3), kwargs = {})
# %clamp_min_2 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_2, 0.0), kwargs = {})
# %clamp_max_2 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_2, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ar/carrwnezxoitom5qyzrwvxxe2xsarer32dfybfdk3w4gttj5i277.py
# Topologically Sorted Source Nodes: [conv2d_11, x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_11 => convolution_11
# x_15 => mul_11, where_11
# x_16 => _unsafe_index, _unsafe_index_1, _unsafe_index_2, _unsafe_index_3, add_4, add_5, add_6, mul_14, mul_15, mul_16, sub_3, sub_4, sub_6
# Graph fragment:
# %convolution_11 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_10, %primals_24, %primals_25, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_11 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_11, 0.1), kwargs = {})
# %where_11 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_11, %convolution_11, %mul_11), kwargs = {})
# %_unsafe_index : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %convert_element_type_1, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_1 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %convert_element_type_1, %clamp_max_1]), kwargs = {})
# %_unsafe_index_2 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %clamp_max, %convert_element_type_3]), kwargs = {})
# %_unsafe_index_3 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_11, [None, None, %clamp_max, %clamp_max_1]), kwargs = {})
# %sub_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_1, %_unsafe_index), kwargs = {})
# %mul_14 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_3, %clamp_max_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index, %mul_14), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_3, %_unsafe_index_2), kwargs = {})
# %mul_15 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_4, %clamp_max_2), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_2, %mul_15), kwargs = {})
# %sub_6 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_5, %add_4), kwargs = {})
# %mul_16 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_6, %clamp_max_3), kwargs = {})
# %add_6 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_16), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4) % 4
x0 = xindex % 4
x6 = (xindex // 16)
x2 = (xindex // 16) % 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (2*tmp19) + (4*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (2*tmp4) + (4*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vm/cvmoqavquuan3erpml2tllmmlw2pfct5mokbplbbjboljuyvw7db.py
# Topologically Sorted Source Nodes: [conv2d_12, x_17], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_12 => convolution_12
# x_17 => gt_12
# Graph fragment:
# %convolution_12 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_6, %primals_26, %primals_27, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_12 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_12, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_16 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_16', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32768],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_16', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_16(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32768
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 16) % 512
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zq/czqy62awvhrtl5r6fvvk4ufd5wffutbs7uz3a6rvpxyaj5tosmne.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_12, %where_9], 1), kwargs = {})
triton_poi_fused_cat_17 = async_compile.triton('triton_poi_fused_cat_17', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_17', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 1024
x0 = xindex % 16
x2 = (xindex // 16384)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (16*x1) + (8192*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (16*x1) + (8192*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 1024, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (16*((-512) + x1)) + (8192*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6w/c6wr4loz4rxs56x2er2z7yyokvbxpzsmffbuifvku2xqaerh75p3.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_19 => convert_element_type_5
# Graph fragment:
# %convert_element_type_5 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_2, torch.int64), kwargs = {})
triton_poi_fused__to_copy_18 = async_compile.triton('triton_poi_fused__to_copy_18', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_18', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_18(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/oq/coq7wg2jvtpm4oc4zm4dvbkwpc5jinzscvm4jrouu3hlob5nd7rs.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_19 => add_8, clamp_max_4
# Graph fragment:
# %add_8 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_5, 1), kwargs = {})
# %clamp_max_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_8, 3), kwargs = {})
triton_poi_fused_add_clamp_19 = async_compile.triton('triton_poi_fused_add_clamp_19', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_19', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_19(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rh/crhh5ib7z3hkisro2vncldz577bevkgu7k3u5nnclrqmgo3wnuzx.py
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_19 => add_7, clamp_max_6, clamp_min_4, clamp_min_6, convert_element_type_4, iota_2, mul_19, sub_7, sub_9
# Graph fragment:
# %iota_2 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (8,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_4 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_2, torch.float32), kwargs = {})
# %add_7 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_4, 0.5), kwargs = {})
# %mul_19 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_7, 0.5), kwargs = {})
# %sub_7 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_19, 0.5), kwargs = {})
# %clamp_min_4 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_7, 0.0), kwargs = {})
# %sub_9 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_4, %convert_element_type_7), kwargs = {})
# %clamp_min_6 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_9, 0.0), kwargs = {})
# %clamp_max_6 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_6, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[8],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0,), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qk/cqkzmxtnx34qktmz7vfilm2bzsfk2r5cxo3wx6pwurql3crkpejs.py
# Topologically Sorted Source Nodes: [conv2d_13, x_18, x_19], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_13 => convolution_13
# x_18 => mul_18, where_13
# x_19 => _unsafe_index_4, _unsafe_index_5, _unsafe_index_6, _unsafe_index_7, add_11, add_12, add_13, mul_21, mul_22, mul_23, sub_10, sub_11, sub_13
# Graph fragment:
# %convolution_13 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat, %primals_28, %primals_29, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_18 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_13, 0.1), kwargs = {})
# %where_13 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_13, %convolution_13, %mul_18), kwargs = {})
# %_unsafe_index_4 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %convert_element_type_5, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_5 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %convert_element_type_5, %clamp_max_5]), kwargs = {})
# %_unsafe_index_6 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %clamp_max_4, %convert_element_type_7]), kwargs = {})
# %_unsafe_index_7 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_13, [None, None, %clamp_max_4, %clamp_max_5]), kwargs = {})
# %sub_10 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_5, %_unsafe_index_4), kwargs = {})
# %mul_21 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_10, %clamp_max_6), kwargs = {})
# %add_11 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_4, %mul_21), kwargs = {})
# %sub_11 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_7, %_unsafe_index_6), kwargs = {})
# %mul_22 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_11, %clamp_max_6), kwargs = {})
# %add_12 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_6, %mul_22), kwargs = {})
# %sub_13 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_12, %add_11), kwargs = {})
# %mul_23 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_13, %clamp_max_7), kwargs = {})
# %add_13 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_11, %mul_23), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 8) % 8
x0 = xindex % 8
x6 = (xindex // 64)
x2 = (xindex // 64) % 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (4*tmp19) + (16*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (4*tmp4) + (16*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vn/cvntemv5weoouv65lvun2muyb6apyj7dkrnebouaithxvdyd4hl4.py
# Topologically Sorted Source Nodes: [conv2d_14, x_20], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_14 => convolution_14
# x_20 => gt_14
# Graph fragment:
# %convolution_14 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_13, %primals_30, %primals_31, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_14 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_14, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_22 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_22', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_22', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_22(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 64) % 256
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/3p/c3pazcbmhoubkrcj7s65glics5kpj5vv7x2zlnvymydp46fxyf2m.py
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_1 => cat_1
# Graph fragment:
# %cat_1 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_14, %where_7], 1), kwargs = {})
triton_poi_fused_cat_23 = async_compile.triton('triton_poi_fused_cat_23', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_23', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 512
x0 = xindex % 64
x2 = (xindex // 32768)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (64*x1) + (16384*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 512, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (64*((-256) + x1)) + (16384*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mi/cmiwjqieuspwn256jnrugfvht2dt7ofln2psibayqc3twrtpkngi.py
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_22 => convert_element_type_9
# Graph fragment:
# %convert_element_type_9 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_4, torch.int64), kwargs = {})
triton_poi_fused__to_copy_24 = async_compile.triton('triton_poi_fused__to_copy_24', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_24', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ki/ckith5u474vepvwaijseaqbn665u5jpg5stc4cj42bnzsgj6uexm.py
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_22 => add_15, clamp_max_8
# Graph fragment:
# %add_15 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_9, 1), kwargs = {})
# %clamp_max_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_15, 7), kwargs = {})
triton_poi_fused_add_clamp_25 = async_compile.triton('triton_poi_fused_add_clamp_25', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_25', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 7, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/lu/cluyprd6omil4csahwtbdldnx2kt7j7znt35dzjdzj4xcxjsppaa.py
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_22 => add_14, clamp_max_10, clamp_min_10, clamp_min_8, convert_element_type_8, iota_4, mul_26, sub_14, sub_16
# Graph fragment:
# %iota_4 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (16,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_8 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_4, torch.float32), kwargs = {})
# %add_14 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_8, 0.5), kwargs = {})
# %mul_26 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_14, 0.5), kwargs = {})
# %sub_14 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_26, 0.5), kwargs = {})
# %clamp_min_8 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_14, 0.0), kwargs = {})
# %sub_16 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_8, %convert_element_type_11), kwargs = {})
# %clamp_min_10 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_16, 0.0), kwargs = {})
# %clamp_max_10 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_10, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/qc/cqcxmomsd2pfozktj753ije5uupmwdotvhhglolxtdikeyegh5yz.py
# Topologically Sorted Source Nodes: [conv2d_15, x_21, x_22], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_15 => convolution_15
# x_21 => mul_25, where_15
# x_22 => _unsafe_index_10, _unsafe_index_11, _unsafe_index_8, _unsafe_index_9, add_18, add_19, add_20, mul_28, mul_29, mul_30, sub_17, sub_18, sub_20
# Graph fragment:
# %convolution_15 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_1, %primals_32, %primals_33, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_25 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_15, 0.1), kwargs = {})
# %where_15 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_15, %convolution_15, %mul_25), kwargs = {})
# %_unsafe_index_8 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %convert_element_type_9, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_9 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %convert_element_type_9, %clamp_max_9]), kwargs = {})
# %_unsafe_index_10 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %clamp_max_8, %convert_element_type_11]), kwargs = {})
# %_unsafe_index_11 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_15, [None, None, %clamp_max_8, %clamp_max_9]), kwargs = {})
# %sub_17 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_9, %_unsafe_index_8), kwargs = {})
# %mul_28 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_17, %clamp_max_10), kwargs = {})
# %add_18 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_8, %mul_28), kwargs = {})
# %sub_18 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_11, %_unsafe_index_10), kwargs = {})
# %mul_29 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_18, %clamp_max_10), kwargs = {})
# %add_19 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_10, %mul_29), kwargs = {})
# %sub_20 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_19, %add_18), kwargs = {})
# %mul_30 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_20, %clamp_max_11), kwargs = {})
# %add_20 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_18, %mul_30), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 16) % 16
x0 = xindex % 16
x6 = (xindex // 256)
x2 = (xindex // 256) % 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (8*tmp19) + (64*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (8*tmp4) + (64*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2n/c2ncutq26bahxlgkdh4rlnvyr47bwimc4zzutvjxr5n6y6efndwb.py
# Topologically Sorted Source Nodes: [conv2d_16, x_23], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_16 => convolution_16
# x_23 => gt_16
# Graph fragment:
# %convolution_16 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_20, %primals_34, %primals_35, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_16 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_16, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_28 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_28', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[131072],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_28', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_28(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 131072
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 256) % 128
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/oe/coeffqdqijre65ihx7pvd5fl3wkvhaztzhmg43n5sxftyfhfnesu.py
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_2 => cat_2
# Graph fragment:
# %cat_2 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_16, %where_5], 1), kwargs = {})
triton_poi_fused_cat_29 = async_compile.triton('triton_poi_fused_cat_29', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_29', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 256) % 256
x0 = xindex % 256
x2 = (xindex // 65536)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (256*x1) + (32768*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 256, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (256*((-128) + x1)) + (32768*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kk/ckknliedqrn55tdjnurtw2wmbhy4m7nftlest3rkcxytrug6sjjb.py
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_25 => convert_element_type_13
# Graph fragment:
# %convert_element_type_13 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_6, torch.int64), kwargs = {})
triton_poi_fused__to_copy_30 = async_compile.triton('triton_poi_fused__to_copy_30', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_30', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_30(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/yi/cyid3ait3xdgmowp4yeresvz4pwdwiylxhglyhgg7hauo5drkgwr.py
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_25 => add_22, clamp_max_12
# Graph fragment:
# %add_22 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_13, 1), kwargs = {})
# %clamp_max_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_22, 15), kwargs = {})
triton_poi_fused_add_clamp_31 = async_compile.triton('triton_poi_fused_add_clamp_31', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_31', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_31(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 15, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2b/c2bhdwgkdtsx66umnkggdw7khh2c5wl4ogab34mcyesmsvh7u75z.py
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_25 => add_21, clamp_max_14, clamp_min_12, clamp_min_14, convert_element_type_12, iota_6, mul_33, sub_21, sub_23
# Graph fragment:
# %iota_6 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (32,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_12 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_6, torch.float32), kwargs = {})
# %add_21 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_12, 0.5), kwargs = {})
# %mul_33 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_21, 0.5), kwargs = {})
# %sub_21 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_33, 0.5), kwargs = {})
# %clamp_min_12 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_21, 0.0), kwargs = {})
# %sub_23 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_12, %convert_element_type_15), kwargs = {})
# %clamp_min_14 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_23, 0.0), kwargs = {})
# %clamp_max_14 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_14, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/74/c74xhgb6wezra6e6cyzbt6yotgcl7i7n4p3es6nogdaczlcdlrl4.py
# Topologically Sorted Source Nodes: [conv2d_17, x_24, x_25], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_17 => convolution_17
# x_24 => mul_32, where_17
# x_25 => _unsafe_index_12, _unsafe_index_13, _unsafe_index_14, _unsafe_index_15, add_25, add_26, add_27, mul_35, mul_36, mul_37, sub_24, sub_25, sub_27
# Graph fragment:
# %convolution_17 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_2, %primals_36, %primals_37, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_32 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_17, 0.1), kwargs = {})
# %where_17 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_17, %convolution_17, %mul_32), kwargs = {})
# %_unsafe_index_12 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %convert_element_type_13, %convert_element_type_15]), kwargs = {})
# %_unsafe_index_13 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %convert_element_type_13, %clamp_max_13]), kwargs = {})
# %_unsafe_index_14 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %clamp_max_12, %convert_element_type_15]), kwargs = {})
# %_unsafe_index_15 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_17, [None, None, %clamp_max_12, %clamp_max_13]), kwargs = {})
# %sub_24 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_13, %_unsafe_index_12), kwargs = {})
# %mul_35 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_24, %clamp_max_14), kwargs = {})
# %add_25 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_12, %mul_35), kwargs = {})
# %sub_25 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_15, %_unsafe_index_14), kwargs = {})
# %mul_36 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_25, %clamp_max_14), kwargs = {})
# %add_26 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_14, %mul_36), kwargs = {})
# %sub_27 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_26, %add_25), kwargs = {})
# %mul_37 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_27, %clamp_max_15), kwargs = {})
# %add_27 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_25, %mul_37), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 32) % 32
x0 = xindex % 32
x6 = (xindex // 1024)
x2 = (xindex // 1024) % 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (16*tmp19) + (256*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (16*tmp4) + (256*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hu/chuh444ehiqujq3pobg3s6kf4jk3jfs66ff4yxzuqyv7z7gvdw4l.py
# Topologically Sorted Source Nodes: [conv2d_18, x_26], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_18 => convolution_18
# x_26 => gt_18
# Graph fragment:
# %convolution_18 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_27, %primals_38, %primals_39, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_18 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_18, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_34 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_34', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[262144],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_34', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_34(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 262144
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 1024) % 64
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/wo/cwov5xjz2rgypru6odo5shttzkvjzbv2j5h765xadmngxefsg27w.py
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_3 => cat_3
# Graph fragment:
# %cat_3 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_18, %where_3], 1), kwargs = {})
triton_poi_fused_cat_35 = async_compile.triton('triton_poi_fused_cat_35', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_35', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 1024) % 128
x0 = xindex % 1024
x2 = (xindex // 131072)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (1024*x1) + (65536*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 128, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (1024*((-64) + x1)) + (65536*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dy/cdyi77rlf5lxwibfyd5p2m432vdqftcfdpn6tuqyv7hyajbxkkvo.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy]
# Source node to ATen node mapping:
# x_28 => convert_element_type_17
# Graph fragment:
# %convert_element_type_17 : [num_users=5] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%view_8, torch.int64), kwargs = {})
triton_poi_fused__to_copy_36 = async_compile.triton('triton_poi_fused__to_copy_36', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_36', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_36(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + (x0), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/l5/cl5xfazaijdiktz5n5gqb2xvmg6f5wpggcjdlnx676ib5wqnt2bo.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp]
# Source node to ATen node mapping:
# x_28 => add_29, clamp_max_16
# Graph fragment:
# %add_29 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_17, 1), kwargs = {})
# %clamp_max_16 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%add_29, 31), kwargs = {})
triton_poi_fused_add_clamp_37 = async_compile.triton('triton_poi_fused_add_clamp_37', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*i64', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_clamp_37', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_clamp_37(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 31, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + (x0), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/zw/czwrbkjbdq3qzzeebsz3vxkktcfyv5fp5csjdanc2n4yhokgkzxs.py
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
# Source node to ATen node mapping:
# x_28 => add_28, clamp_max_18, clamp_min_16, clamp_min_18, convert_element_type_16, iota_8, mul_40, sub_28, sub_30
# Graph fragment:
# %iota_8 : [num_users=1] = call_function[target=torch.ops.prims.iota.default](args = (64,), kwargs = {start: 0, step: 1, dtype: torch.int64, device: cuda:0, requires_grad: False})
# %convert_element_type_16 : [num_users=1] = call_function[target=torch.ops.prims.convert_element_type.default](args = (%iota_8, torch.float32), kwargs = {})
# %add_28 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convert_element_type_16, 0.5), kwargs = {})
# %mul_40 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_28, 0.5), kwargs = {})
# %sub_28 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_40, 0.5), kwargs = {})
# %clamp_min_16 : [num_users=3] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_28, 0.0), kwargs = {})
# %sub_30 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%clamp_min_16, %convert_element_type_19), kwargs = {})
# %clamp_min_18 : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%sub_30, 0.0), kwargs = {})
# %clamp_max_18 : [num_users=3] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min_18, 1.0), kwargs = {})
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38 = async_compile.triton('triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 0, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38(out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + (x0), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cj/ccjteqzvlszshm7xogphjn6lezrnhtguz6t2ft3y2qrajrcko2wk.py
# Topologically Sorted Source Nodes: [conv2d_19, x_27, x_28], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
# Source node to ATen node mapping:
# conv2d_19 => convolution_19
# x_27 => mul_39, where_19
# x_28 => _unsafe_index_16, _unsafe_index_17, _unsafe_index_18, _unsafe_index_19, add_32, add_33, add_34, mul_42, mul_43, mul_44, sub_31, sub_32, sub_34
# Graph fragment:
# %convolution_19 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%cat_3, %primals_40, %primals_41, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %mul_39 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_19, 0.1), kwargs = {})
# %where_19 : [num_users=4] = call_function[target=torch.ops.aten.where.self](args = (%gt_19, %convolution_19, %mul_39), kwargs = {})
# %_unsafe_index_16 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %convert_element_type_17, %convert_element_type_19]), kwargs = {})
# %_unsafe_index_17 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %convert_element_type_17, %clamp_max_17]), kwargs = {})
# %_unsafe_index_18 : [num_users=2] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %clamp_max_16, %convert_element_type_19]), kwargs = {})
# %_unsafe_index_19 : [num_users=1] = call_function[target=torch.ops.aten._unsafe_index.Tensor](args = (%where_19, [None, None, %clamp_max_16, %clamp_max_17]), kwargs = {})
# %sub_31 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_17, %_unsafe_index_16), kwargs = {})
# %mul_42 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_31, %clamp_max_18), kwargs = {})
# %add_32 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_16, %mul_42), kwargs = {})
# %sub_32 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%_unsafe_index_19, %_unsafe_index_18), kwargs = {})
# %mul_43 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_32, %clamp_max_18), kwargs = {})
# %add_33 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%_unsafe_index_18, %mul_43), kwargs = {})
# %sub_34 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_33, %add_32), kwargs = {})
# %mul_44 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_34, %clamp_max_19), kwargs = {})
# %add_34 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_32, %mul_44), kwargs = {})
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39 = async_compile.triton('triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*i64', 2: '*i64', 3: '*i1', 4: '*fp32', 5: '*fp32', 6: '*i64', 7: '*i64', 8: '*fp32', 9: '*fp32', 10: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39', 'mutated_arg_names': ['in_out_ptr1'], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39(in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 64) % 64
x0 = xindex % 64
x6 = (xindex // 4096)
x2 = (xindex // 4096) % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x1), None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (x0), None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + (x2), None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + (x1), None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + (x0), None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + (x0), None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + (x1), None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + (32*tmp19) + (1024*x6)), None, eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + (32*tmp4) + (1024*x6)), None, eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + (x4), tmp49, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/vz/cvzt4swwgye3zpxep4ligqcdlu5xxf7fecsvlec4qsz3qtn6tkxy.py
# Topologically Sorted Source Nodes: [conv2d_20, x_29], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_20 => convolution_20
# x_29 => gt_20
# Graph fragment:
# %convolution_20 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%add_34, %primals_42, %primals_43, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_20 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_20, 0), kwargs = {})
triton_poi_fused_convolution_leaky_relu_40 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_40', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[524288],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_40', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_40(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 524288
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 32
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + (x3), tmp4, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rm/crmdmqyxav4fb4725fm2hhwf6m4yrxuhqmo2dbcjkiu6gomd2akp.py
# Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat_4 => cat_4
# Graph fragment:
# %cat_4 : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%where_20, %where_1], 1), kwargs = {})
triton_poi_fused_cat_41 = async_compile.triton('triton_poi_fused_cat_41', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1048576],
filename=__file__,
triton_meta={'signature': {0: '*i1', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_41', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_41(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1048576
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x1 = (xindex // 4096) % 64
x0 = xindex % 4096
x2 = (xindex // 262144)
x3 = xindex
tmp0 = x1
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + (4096*x1) + (131072*x2)), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + (x1), tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tmp15 = tl.full([1], 64, tl.int64)
tmp16 = tmp0 < tmp15
tmp17 = tl.load(in_ptr3 + (x0 + (4096*((-32) + x1)) + (131072*x2)), tmp14, other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + (x3), tmp18, None)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/p7/cp74qdkmdadhqce4dzechhvpihfuica6bbejv65ptme5otg3jhj3.py
# Topologically Sorted Source Nodes: [conv2d_22, x_31], Original ATen: [aten.convolution, aten.leaky_relu]
# Source node to ATen node mapping:
# conv2d_22 => convolution_22
# x_31 => gt_22, mul_47, where_22
# Graph fragment:
# %convolution_22 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%where_21, %primals_46, %primals_47, [1, 1], [1, 1], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt_22 : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_22, 0), kwargs = {})
# %mul_47 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%convolution_22, 0.1), kwargs = {})
# %where_22 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_22, %convolution_22, %mul_47), kwargs = {})
triton_poi_fused_convolution_leaky_relu_42 = async_compile.triton('triton_poi_fused_convolution_leaky_relu_42', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[65536],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_leaky_relu_42', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_42(in_ptr0, in_ptr1, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 65536
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = (xindex // 4096) % 4
tmp0 = tl.load(in_ptr0 + (x3), None)
tmp1 = tl.load(in_ptr1 + (x1), None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + (x3), tmp4, None)
tl.store(out_ptr1 + (x3), tmp7, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47 = args
args.clear()
assert_size_stride(primals_1, (32, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (32, ), (1, ))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 7, 7), (1568, 49, 7, 1))
assert_size_stride(primals_5, (32, ), (1, ))
assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (64, ), (1, ))
assert_size_stride(primals_8, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_9, (64, ), (1, ))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128, ), (1, ))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128, ), (1, ))
assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (256, ), (1, ))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256, ), (1, ))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512, ), (1, ))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512, ), (1, ))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512, ), (1, ))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512, ), (1, ))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512, ), (1, ))
assert_size_stride(primals_28, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_29, (512, ), (1, ))
assert_size_stride(primals_30, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_31, (256, ), (1, ))
assert_size_stride(primals_32, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_33, (256, ), (1, ))
assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_35, (128, ), (1, ))
assert_size_stride(primals_36, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_37, (128, ), (1, ))
assert_size_stride(primals_38, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (64, ), (1, ))
assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (64, ), (1, ))
assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_43, (32, ), (1, ))
assert_size_stride(primals_44, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_45, (32, ), (1, ))
assert_size_stride(primals_46, (4, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_47, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, x], Original ATen: [aten.convolution, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0.run(buf0, primals_2, buf1, buf2, 524288, grid=grid(524288), stream=stream0)
del primals_2
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1), padding=(3, 3), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf5 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [conv2d_1, s1], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf3, primals_5, buf4, buf5, 524288, grid=grid(524288), stream=stream0)
del primals_5
buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_1.run(buf5, buf6, 131072, grid=grid(131072), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_2], Original ATen: [aten.convolution]
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_2, x_2], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf7, primals_7, buf8, buf9, 262144, grid=grid(262144), stream=stream0)
del primals_7
# Topologically Sorted Source Nodes: [conv2d_3], Original ATen: [aten.convolution]
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1), padding=(2, 2), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf11 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
buf12 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [conv2d_3, x_3], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_2.run(buf10, primals_9, buf11, buf12, 262144, grid=grid(262144), stream=stream0)
del primals_9
buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_4], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_3.run(buf12, buf13, 65536, grid=grid(65536), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_4], Original ATen: [aten.convolution]
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 128, 16, 16), (32768, 256, 16, 1))
buf15 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
buf16 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_4, x_5], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf14, primals_11, buf15, buf16, 131072, grid=grid(131072), stream=stream0)
del primals_11
# Topologically Sorted Source Nodes: [conv2d_5], Original ATen: [aten.convolution]
buf17 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 128, 16, 16), (32768, 256, 16, 1))
buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
buf19 = buf14; del buf14 # reuse
# Topologically Sorted Source Nodes: [conv2d_5, x_6], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_4.run(buf17, primals_13, buf18, buf19, 131072, grid=grid(131072), stream=stream0)
del primals_13
buf20 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_7], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_5.run(buf19, buf20, 32768, grid=grid(32768), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_6], Original ATen: [aten.convolution]
buf21 = extern_kernels.convolution(buf20, primals_14, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 256, 8, 8), (16384, 64, 8, 1))
buf22 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
buf23 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_6, x_8], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_6.run(buf21, primals_15, buf22, buf23, 65536, grid=grid(65536), stream=stream0)
del primals_15
# Topologically Sorted Source Nodes: [conv2d_7], Original ATen: [aten.convolution]
buf24 = extern_kernels.convolution(buf23, primals_16, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1))
buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
buf26 = buf21; del buf21 # reuse
# Topologically Sorted Source Nodes: [conv2d_7, x_9], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_6.run(buf24, primals_17, buf25, buf26, 65536, grid=grid(65536), stream=stream0)
del primals_17
buf27 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_10], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_7.run(buf26, buf27, 16384, grid=grid(16384), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_8], Original ATen: [aten.convolution]
buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 512, 4, 4), (8192, 16, 4, 1))
buf29 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
buf30 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_8, x_11], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_8.run(buf28, primals_19, buf29, buf30, 32768, grid=grid(32768), stream=stream0)
del primals_19
# Topologically Sorted Source Nodes: [conv2d_9], Original ATen: [aten.convolution]
buf31 = extern_kernels.convolution(buf30, primals_20, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 512, 4, 4), (8192, 16, 4, 1))
buf32 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
buf33 = buf28; del buf28 # reuse
# Topologically Sorted Source Nodes: [conv2d_9, x_12], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_8.run(buf31, primals_21, buf32, buf33, 32768, grid=grid(32768), stream=stream0)
del primals_21
buf34 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_13], Original ATen: [aten.avg_pool2d]
triton_poi_fused_avg_pool2d_9.run(buf33, buf34, 8192, grid=grid(8192), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d_10], Original ATen: [aten.convolution]
buf35 = extern_kernels.convolution(buf34, primals_22, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 512, 2, 2), (2048, 4, 2, 1))
buf36 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool)
buf37 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d_10, x_14], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_10.run(buf35, primals_23, buf36, buf37, 8192, grid=grid(8192), stream=stream0)
del buf35
del primals_23
# Topologically Sorted Source Nodes: [conv2d_11], Original ATen: [aten.convolution]
buf38 = extern_kernels.convolution(buf37, primals_24, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 2, 2), (2048, 4, 2, 1))
buf39 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_11, x_15], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_11.run(buf38, primals_25, buf39, 8192, grid=grid(8192), stream=stream0)
buf40 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_12.run(buf40, 4, grid=grid(4), stream=stream0)
buf41 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_13.run(buf41, 4, grid=grid(4), stream=stream0)
buf42 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_12.run(buf42, 4, grid=grid(4), stream=stream0)
buf43 = empty_strided_cuda((4, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_13.run(buf43, 4, grid=grid(4), stream=stream0)
buf46 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14.run(buf46, 4, grid=grid(4), stream=stream0)
buf48 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_16], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14.run(buf48, 4, grid=grid(4), stream=stream0)
buf45 = buf31; del buf31 # reuse
buf49 = buf45; del buf45 # reuse
buf50 = buf49; del buf49 # reuse
# Topologically Sorted Source Nodes: [conv2d_11, x_15, x_16], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15.run(buf50, buf41, buf42, buf39, buf38, primals_25, buf40, buf43, buf46, buf48, 32768, grid=grid(32768), stream=stream0)
del buf38
del primals_25
# Topologically Sorted Source Nodes: [conv2d_12], Original ATen: [aten.convolution]
buf51 = extern_kernels.convolution(buf50, primals_26, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 512, 4, 4), (8192, 16, 4, 1))
buf52 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_12, x_17], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_16.run(buf51, primals_27, buf52, 32768, grid=grid(32768), stream=stream0)
buf53 = reinterpret_tensor(buf24, (4, 1024, 4, 4), (16384, 16, 4, 1), 0); del buf24 # reuse
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_17.run(buf52, buf51, primals_27, buf33, buf53, 65536, grid=grid(65536), stream=stream0)
del buf51
del primals_27
# Topologically Sorted Source Nodes: [conv2d_13], Original ATen: [aten.convolution]
buf54 = extern_kernels.convolution(buf53, primals_28, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 512, 4, 4), (8192, 16, 4, 1))
buf55 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_13, x_18], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_16.run(buf54, primals_29, buf55, 32768, grid=grid(32768), stream=stream0)
buf56 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_18.run(buf56, 8, grid=grid(8), stream=stream0)
buf57 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_19.run(buf57, 8, grid=grid(8), stream=stream0)
buf58 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_18.run(buf58, 8, grid=grid(8), stream=stream0)
buf59 = empty_strided_cuda((8, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_19.run(buf59, 8, grid=grid(8), stream=stream0)
buf62 = empty_strided_cuda((8, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20.run(buf62, 8, grid=grid(8), stream=stream0)
buf64 = empty_strided_cuda((8, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_19], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20.run(buf64, 8, grid=grid(8), stream=stream0)
buf61 = reinterpret_tensor(buf17, (4, 512, 8, 8), (32768, 64, 8, 1), 0); del buf17 # reuse
buf65 = buf61; del buf61 # reuse
buf66 = buf65; del buf65 # reuse
# Topologically Sorted Source Nodes: [conv2d_13, x_18, x_19], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21.run(buf66, buf57, buf58, buf55, buf54, primals_29, buf56, buf59, buf62, buf64, 131072, grid=grid(131072), stream=stream0)
del buf54
del primals_29
# Topologically Sorted Source Nodes: [conv2d_14], Original ATen: [aten.convolution]
buf67 = extern_kernels.convolution(buf66, primals_30, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 256, 8, 8), (16384, 64, 8, 1))
buf68 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_14, x_20], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_22.run(buf67, primals_31, buf68, 65536, grid=grid(65536), stream=stream0)
buf69 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_1], Original ATen: [aten.cat]
triton_poi_fused_cat_23.run(buf68, buf67, primals_31, buf26, buf69, 131072, grid=grid(131072), stream=stream0)
del buf67
del primals_31
# Topologically Sorted Source Nodes: [conv2d_15], Original ATen: [aten.convolution]
buf70 = extern_kernels.convolution(buf69, primals_32, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 256, 8, 8), (16384, 64, 8, 1))
buf71 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_15, x_21], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_22.run(buf70, primals_33, buf71, 65536, grid=grid(65536), stream=stream0)
buf72 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_24.run(buf72, 16, grid=grid(16), stream=stream0)
buf73 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_25.run(buf73, 16, grid=grid(16), stream=stream0)
buf74 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_24.run(buf74, 16, grid=grid(16), stream=stream0)
buf75 = empty_strided_cuda((16, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_25.run(buf75, 16, grid=grid(16), stream=stream0)
buf78 = empty_strided_cuda((16, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26.run(buf78, 16, grid=grid(16), stream=stream0)
buf80 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_22], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26.run(buf80, 16, grid=grid(16), stream=stream0)
buf77 = reinterpret_tensor(buf10, (4, 256, 16, 16), (65536, 256, 16, 1), 0); del buf10 # reuse
buf81 = buf77; del buf77 # reuse
buf82 = buf81; del buf81 # reuse
# Topologically Sorted Source Nodes: [conv2d_15, x_21, x_22], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27.run(buf82, buf73, buf74, buf71, buf70, primals_33, buf72, buf75, buf78, buf80, 262144, grid=grid(262144), stream=stream0)
del primals_33
# Topologically Sorted Source Nodes: [conv2d_16], Original ATen: [aten.convolution]
buf83 = extern_kernels.convolution(buf82, primals_34, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1))
buf84 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_16, x_23], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_28.run(buf83, primals_35, buf84, 131072, grid=grid(131072), stream=stream0)
buf85 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_2], Original ATen: [aten.cat]
triton_poi_fused_cat_29.run(buf84, buf83, primals_35, buf19, buf85, 262144, grid=grid(262144), stream=stream0)
del buf83
del primals_35
# Topologically Sorted Source Nodes: [conv2d_17], Original ATen: [aten.convolution]
buf86 = extern_kernels.convolution(buf85, primals_36, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 16, 16), (32768, 256, 16, 1))
buf87 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_17, x_24], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_28.run(buf86, primals_37, buf87, 131072, grid=grid(131072), stream=stream0)
buf88 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_30.run(buf88, 32, grid=grid(32), stream=stream0)
buf89 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_31.run(buf89, 32, grid=grid(32), stream=stream0)
buf90 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_30.run(buf90, 32, grid=grid(32), stream=stream0)
buf91 = empty_strided_cuda((32, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_31.run(buf91, 32, grid=grid(32), stream=stream0)
buf94 = empty_strided_cuda((32, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32.run(buf94, 32, grid=grid(32), stream=stream0)
buf96 = empty_strided_cuda((32, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_25], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32.run(buf96, 32, grid=grid(32), stream=stream0)
buf93 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024, 32, 1), 0); del buf3 # reuse
buf97 = buf93; del buf93 # reuse
buf98 = buf97; del buf97 # reuse
# Topologically Sorted Source Nodes: [conv2d_17, x_24, x_25], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33.run(buf98, buf89, buf90, buf87, buf86, primals_37, buf88, buf91, buf94, buf96, 524288, grid=grid(524288), stream=stream0)
del buf86
del primals_37
# Topologically Sorted Source Nodes: [conv2d_18], Original ATen: [aten.convolution]
buf99 = extern_kernels.convolution(buf98, primals_38, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf100 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_18, x_26], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_34.run(buf99, primals_39, buf100, 262144, grid=grid(262144), stream=stream0)
buf101 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_3], Original ATen: [aten.cat]
triton_poi_fused_cat_35.run(buf100, buf99, primals_39, buf12, buf101, 524288, grid=grid(524288), stream=stream0)
del buf99
del primals_39
# Topologically Sorted Source Nodes: [conv2d_19], Original ATen: [aten.convolution]
buf102 = extern_kernels.convolution(buf101, primals_40, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf102, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf103 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_19, x_27], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_34.run(buf102, primals_41, buf103, 262144, grid=grid(262144), stream=stream0)
buf104 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten._to_copy]
triton_poi_fused__to_copy_36.run(buf104, 64, grid=grid(64), stream=stream0)
buf105 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_37.run(buf105, 64, grid=grid(64), stream=stream0)
buf106 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_36.run(buf106, 64, grid=grid(64), stream=stream0)
buf107 = empty_strided_cuda((64, ), (1, ), torch.int64)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.add, aten.clamp]
triton_poi_fused_add_clamp_37.run(buf107, 64, grid=grid(64), stream=stream0)
buf110 = empty_strided_cuda((64, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.arange, aten._to_copy, aten.add, aten.mul, aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38.run(buf110, 64, grid=grid(64), stream=stream0)
buf112 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_28], Original ATen: [aten.sub, aten.clamp]
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38.run(buf112, 64, grid=grid(64), stream=stream0)
buf109 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
buf113 = buf109; del buf109 # reuse
buf114 = buf113; del buf113 # reuse
# Topologically Sorted Source Nodes: [conv2d_19, x_27, x_28], Original ATen: [aten.convolution, aten.leaky_relu, aten._unsafe_index, aten.sub, aten.mul, aten.add]
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39.run(buf114, buf105, buf106, buf103, buf102, primals_41, buf104, buf107, buf110, buf112, 1048576, grid=grid(1048576), stream=stream0)
del buf102
del primals_41
# Topologically Sorted Source Nodes: [conv2d_20], Original ATen: [aten.convolution]
buf115 = extern_kernels.convolution(buf114, primals_42, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf116 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
# Topologically Sorted Source Nodes: [conv2d_20, x_29], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_40.run(buf115, primals_43, buf116, 524288, grid=grid(524288), stream=stream0)
buf117 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat_4], Original ATen: [aten.cat]
triton_poi_fused_cat_41.run(buf116, buf115, primals_43, buf5, buf117, 1048576, grid=grid(1048576), stream=stream0)
del primals_43
# Topologically Sorted Source Nodes: [conv2d_21], Original ATen: [aten.convolution]
buf118 = extern_kernels.convolution(buf117, primals_44, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf119 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1), torch.bool)
buf120 = buf115; del buf115 # reuse
# Topologically Sorted Source Nodes: [conv2d_21, x_30], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_0.run(buf118, primals_45, buf119, buf120, 524288, grid=grid(524288), stream=stream0)
del buf118
del primals_45
# Topologically Sorted Source Nodes: [conv2d_22], Original ATen: [aten.convolution]
buf121 = extern_kernels.convolution(buf120, primals_46, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf121, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf122 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1), torch.bool)
buf123 = reinterpret_tensor(buf70, (4, 4, 64, 64), (16384, 4096, 64, 1), 0); del buf70 # reuse
# Topologically Sorted Source Nodes: [conv2d_22, x_31], Original ATen: [aten.convolution, aten.leaky_relu]
triton_poi_fused_convolution_leaky_relu_42.run(buf121, primals_47, buf122, buf123, 65536, grid=grid(65536), stream=stream0)
del buf121
del primals_47
return (buf123, primals_1, primals_3, primals_4, primals_6, primals_8, primals_10, primals_12, primals_14, primals_16, primals_18, primals_20, primals_22, primals_24, primals_26, primals_28, primals_30, primals_32, primals_34, primals_36, primals_38, primals_40, primals_42, primals_44, primals_46, buf1, buf2, buf4, buf5, buf6, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf18, buf19, buf20, buf22, buf23, buf25, buf26, buf27, buf29, buf30, buf32, buf33, buf34, buf36, buf37, buf39, buf40, buf41, buf42, buf43, buf46, buf48, buf50, buf52, buf53, buf55, buf56, buf57, buf58, buf59, buf62, buf64, buf66, buf68, buf69, buf71, buf72, buf73, buf74, buf75, buf78, buf80, buf82, buf84, buf85, buf87, buf88, buf89, buf90, buf91, buf94, buf96, buf98, buf100, buf101, buf103, buf104, buf105, buf106, buf107, buf110, buf112, buf114, buf116, buf117, buf119, buf120, buf122, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((32, 4, 7, 7), (196, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((32, 32, 7, 7), (1568, 49, 7, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((64, 32, 5, 5), (800, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((64, 64, 5, 5), (1600, 25, 5, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((128, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((128, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_13 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_14 = rand_strided((256, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_15 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_16 = rand_strided((256, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_17 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_18 = rand_strided((512, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_19 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_20 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_21 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_22 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_23 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_24 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_25 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_26 = rand_strided((512, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_27 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_28 = rand_strided((512, 1024, 3, 3), (9216, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_29 = rand_strided((512, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_30 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_31 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_32 = rand_strided((256, 512, 3, 3), (4608, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_33 = rand_strided((256, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_34 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_35 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_36 = rand_strided((128, 256, 3, 3), (2304, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_37 = rand_strided((128, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_38 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_39 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_40 = rand_strided((64, 128, 3, 3), (1152, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_41 = rand_strided((64, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_42 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_43 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_44 = rand_strided((32, 64, 3, 3), (576, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_45 = rand_strided((32, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_46 = rand_strided((4, 32, 3, 3), (288, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_47 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, primals_13, primals_14, primals_15, primals_16, primals_17, primals_18, primals_19, primals_20, primals_21, primals_22, primals_23, primals_24, primals_25, primals_26, primals_27, primals_28, primals_29, primals_30, primals_31, primals_32, primals_33, primals_34, primals_35, primals_36, primals_37, primals_38, primals_39, primals_40, primals_41, primals_42, primals_43, primals_44, primals_45, primals_46, primals_47])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.functional import F
from torch.nn import functional as F
class down(nn.Module):
"""
A class for creating neural network blocks containing layers:
Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super(down, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=
1, padding=int((filterSize - 1) / 2))
self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride
=1, padding=int((filterSize - 1) / 2))
def forward(self, x):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
x = F.avg_pool2d(x, 2)
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(x), negative_slope=0.1)
return x
class up(nn.Module):
"""
A class for creating neural network blocks containing layers:
Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x, skpCn)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used for setting input and output channels for
the second convolutional layer.
"""
super(up, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1,
padding=1)
def forward(self, x, skpCn):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
skpCn : tensor
skip connection input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
x = F.interpolate(x, scale_factor=2, mode='bilinear')
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)),
negative_slope=0.1)
return x
class UNet(nn.Module):
"""
A class for creating UNet like architecture as specified by the
Super SloMo paper.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the UNet.
outChannels : int
number of output channels for the UNet.
"""
super(UNet, self).__init__()
self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)
self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)
def forward(self, x):
"""
Returns output tensor after passing input `x` to the neural network.
Parameters
----------
x : tensor
input to the UNet.
Returns
-------
tensor
output of the UNet.
"""
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
s1 = F.leaky_relu(self.conv2(x), negative_slope=0.1)
s2 = self.down1(s1)
s3 = self.down2(s2)
s4 = self.down3(s3)
s5 = self.down4(s4)
x = self.down5(s5)
x = self.up1(x, s5)
x = self.up2(x, s4)
x = self.up3(x, s3)
x = self.up4(x, s2)
x = self.up5(x, s1)
x = F.leaky_relu(self.conv3(x), negative_slope=0.1)
return x
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'inChannels': 4, 'outChannels': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
import torch.nn.functional as F
from torch.functional import F
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_leaky_relu_0(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 32
x1 = xindex // 32
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 128 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 128 * x1), None, eviction_policy
='evict_last')
tmp3 = tl.load(in_ptr0 + (64 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (65 + 2 * x0 + 128 * x1), None,
eviction_policy='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_2(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 64 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (32 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (33 + 2 * x0 + 64 * x1), None, eviction_policy
='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_4(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_5(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 32 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (16 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp5 = tl.load(in_ptr0 + (17 + 2 * x0 + 32 * x1), None, eviction_policy
='evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_6(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_7(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (8 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (9 + 2 * x0 + 16 * x1), None, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_8(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_avg_pool2d_9(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp1 = tl.load(in_ptr0 + (1 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr0 + (4 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp5 = tl.load(in_ptr0 + (5 + 2 * x0 + 8 * x1), None, eviction_policy=
'evict_last')
tmp2 = tmp1 + tmp0
tmp4 = tmp3 + tmp2
tmp6 = tmp5 + tmp4
tmp7 = 0.25
tmp8 = tmp6 * tmp7
tl.store(out_ptr0 + x2, tmp8, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_10(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_11(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused__to_copy_12(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_13(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = triton_helpers.minimum(tmp10, tmp9)
tl.store(out_ptr0 + x0, tmp11, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4 % 4
x0 = xindex % 4
x6 = xindex // 16
x2 = xindex // 16 % 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 2, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 2 * tmp19 + 4 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 2 * tmp19 + 4 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 2 * tmp19 + 4 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 2 * tmp19 + 4 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 2 * tmp4 + 4 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_16(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 16 % 512
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_17(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 1024
x0 = xindex % 16
x2 = xindex // 16384
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 512, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 16 * x1 + 8192 * x2), tmp4, other=0.0).to(tl
.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 16 * x1 + 8192 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 1024, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 16 * (-512 + x1) + 8192 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused__to_copy_18(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_19(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 3, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 8
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 8 % 8
x0 = xindex % 8
x6 = xindex // 64
x2 = xindex // 64 % 512
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 4, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 4 * tmp19 + 16 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 4 * tmp19 + 16 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 4 * tmp19 + 16 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 4 * tmp19 + 16 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 4 * tmp4 + 16 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_22(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 64 % 256
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_23(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 512
x0 = xindex % 64
x2 = xindex // 32768
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 256, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0).to(
tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 64 * x1 + 16384 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 512, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 64 * (-256 + x1) + 16384 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused__to_copy_24(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_25(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 7, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 16 % 16
x0 = xindex % 16
x6 = xindex // 256
x2 = xindex // 256 % 256
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 8, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 8 * tmp19 + 64 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 8 * tmp19 + 64 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 8 * tmp19 + 64 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 8 * tmp19 + 64 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 8 * tmp4 + 64 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_28(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 256 % 128
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_29(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 256 % 256
x0 = xindex % 256
x2 = xindex // 65536
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 128, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0).to(
tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 256 * x1 + 32768 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 256, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 256 * (-128 + x1) + 32768 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused__to_copy_30(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_31(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 15, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 32 % 32
x0 = xindex % 32
x6 = xindex // 1024
x2 = xindex // 1024 % 128
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 16, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 16 * tmp19 + 256 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 16 * tmp19 + 256 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 16 * tmp19 + 256 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 16 * tmp19 + 256 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 16 * tmp4 + 256 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_34(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 1024 % 64
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_35(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 1024 % 128
x0 = xindex % 1024
x2 = xindex // 131072
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 64, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0
).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 1024 * x1 + 65536 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 128, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 1024 * (-64 + x1) + 65536 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused__to_copy_36(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tl.store(out_ptr0 + x0, tmp8, xmask)
@triton.jit
def triton_poi_fused_add_clamp_37(out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tl.full([1], 1, tl.int64)
tmp10 = tmp8 + tmp9
tmp11 = tl.full([1], 31, tl.int64)
tmp12 = triton_helpers.minimum(tmp10, tmp11)
tl.store(out_ptr0 + x0, tmp12, xmask)
@triton.jit
def triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38(out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = x0
tmp1 = tmp0.to(tl.float32)
tmp2 = 0.5
tmp3 = tmp1 + tmp2
tmp4 = tmp3 * tmp2
tmp5 = tmp4 - tmp2
tmp6 = 0.0
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp7.to(tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 - tmp9
tmp11 = triton_helpers.maximum(tmp10, tmp6)
tmp12 = 1.0
tmp13 = triton_helpers.minimum(tmp11, tmp12)
tl.store(out_ptr0 + x0, tmp13, xmask)
@triton.jit
def triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39(
in_out_ptr1, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5,
in_ptr6, in_ptr7, in_ptr8, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 64 % 64
x0 = xindex % 64
x6 = xindex // 4096
x2 = xindex // 4096 % 64
x4 = xindex
tmp0 = tl.load(in_ptr0 + x1, None, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + x0, None, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr4 + x2, None, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr5 + x1, None, eviction_policy='evict_last')
tmp25 = tl.load(in_ptr6 + x0, None, eviction_policy='evict_last')
tmp35 = tl.load(in_ptr7 + x0, None, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr8 + x1, None, eviction_policy='evict_last')
tmp1 = tl.full([XBLOCK], 32, tl.int32)
tmp2 = tmp0 + tmp1
tmp3 = tmp0 < 0
tmp4 = tl.where(tmp3, tmp2, tmp0)
tmp6 = tmp5 + tmp1
tmp7 = tmp5 < 0
tmp8 = tl.where(tmp7, tmp6, tmp5)
tmp9 = tl.load(in_ptr2 + (tmp8 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp10 = tl.load(in_ptr3 + (tmp8 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last')
tmp12 = tmp10 + tmp11
tmp13 = 0.1
tmp14 = tmp12 * tmp13
tmp15 = tl.where(tmp9, tmp12, tmp14)
tmp17 = tmp16 + tmp1
tmp18 = tmp16 < 0
tmp19 = tl.where(tmp18, tmp17, tmp16)
tmp20 = tl.load(in_ptr2 + (tmp8 + 32 * tmp19 + 1024 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp21 = tl.load(in_ptr3 + (tmp8 + 32 * tmp19 + 1024 * x6), None,
eviction_policy='evict_last')
tmp22 = tmp21 + tmp11
tmp23 = tmp22 * tmp13
tmp24 = tl.where(tmp20, tmp22, tmp23)
tmp26 = tmp25 + tmp1
tmp27 = tmp25 < 0
tmp28 = tl.where(tmp27, tmp26, tmp25)
tmp29 = tl.load(in_ptr2 + (tmp28 + 32 * tmp19 + 1024 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp30 = tl.load(in_ptr3 + (tmp28 + 32 * tmp19 + 1024 * x6), None,
eviction_policy='evict_last')
tmp31 = tmp30 + tmp11
tmp32 = tmp31 * tmp13
tmp33 = tl.where(tmp29, tmp31, tmp32)
tmp34 = tmp33 - tmp24
tmp36 = tmp34 * tmp35
tmp37 = tmp24 + tmp36
tmp38 = tl.load(in_ptr2 + (tmp28 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last').to(tl.int1)
tmp39 = tl.load(in_ptr3 + (tmp28 + 32 * tmp4 + 1024 * x6), None,
eviction_policy='evict_last')
tmp40 = tmp39 + tmp11
tmp41 = tmp40 * tmp13
tmp42 = tl.where(tmp38, tmp40, tmp41)
tmp43 = tmp42 - tmp15
tmp44 = tmp43 * tmp35
tmp45 = tmp15 + tmp44
tmp46 = tmp45 - tmp37
tmp48 = tmp46 * tmp47
tmp49 = tmp37 + tmp48
tl.store(in_out_ptr1 + x4, tmp49, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_40(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 32
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tl.store(out_ptr0 + x3, tmp4, None)
@triton.jit
def triton_poi_fused_cat_41(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x1 = xindex // 4096 % 64
x0 = xindex % 4096
x2 = xindex // 262144
x3 = xindex
tmp0 = x1
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 32, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0
).to(tl.int1)
tmp6 = tl.load(in_ptr1 + (x0 + 4096 * x1 + 131072 * x2), tmp4, other=0.0)
tmp7 = tl.load(in_ptr2 + x1, tmp4, eviction_policy='evict_last', other=0.0)
tmp8 = tmp6 + tmp7
tmp9 = 0.1
tmp10 = tmp8 * tmp9
tmp11 = tl.where(tmp5, tmp8, tmp10)
tmp12 = tl.full(tmp11.shape, 0.0, tmp11.dtype)
tmp13 = tl.where(tmp4, tmp11, tmp12)
tmp14 = tmp0 >= tmp3
tl.full([1], 64, tl.int64)
tmp17 = tl.load(in_ptr3 + (x0 + 4096 * (-32 + x1) + 131072 * x2), tmp14,
other=0.0)
tmp18 = tl.where(tmp4, tmp13, tmp17)
tl.store(out_ptr0 + x3, tmp18, None)
@triton.jit
def triton_poi_fused_convolution_leaky_relu_42(in_ptr0, in_ptr1, out_ptr0,
out_ptr1, xnumel, XBLOCK: tl.constexpr):
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
tl.full([XBLOCK], True, tl.int1)
x3 = xindex
x1 = xindex // 4096 % 4
tmp0 = tl.load(in_ptr0 + x3, None)
tmp1 = tl.load(in_ptr1 + x1, None, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = 0.0
tmp4 = tmp2 > tmp3
tmp5 = 0.1
tmp6 = tmp2 * tmp5
tmp7 = tl.where(tmp4, tmp2, tmp6)
tl.store(out_ptr0 + x3, tmp4, None)
tl.store(out_ptr1 + x3, tmp7, None)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12,
primals_13, primals_14, primals_15, primals_16, primals_17,
primals_18, primals_19, primals_20, primals_21, primals_22,
primals_23, primals_24, primals_25, primals_26, primals_27,
primals_28, primals_29, primals_30, primals_31, primals_32,
primals_33, primals_34, primals_35, primals_36, primals_37,
primals_38, primals_39, primals_40, primals_41, primals_42,
primals_43, primals_44, primals_45, primals_46, primals_47) = args
args.clear()
assert_size_stride(primals_1, (32, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(primals_2, (32,), (1,))
assert_size_stride(primals_3, (4, 4, 64, 64), (16384, 4096, 64, 1))
assert_size_stride(primals_4, (32, 32, 7, 7), (1568, 49, 7, 1))
assert_size_stride(primals_5, (32,), (1,))
assert_size_stride(primals_6, (64, 32, 5, 5), (800, 25, 5, 1))
assert_size_stride(primals_7, (64,), (1,))
assert_size_stride(primals_8, (64, 64, 5, 5), (1600, 25, 5, 1))
assert_size_stride(primals_9, (64,), (1,))
assert_size_stride(primals_10, (128, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_11, (128,), (1,))
assert_size_stride(primals_12, (128, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_13, (128,), (1,))
assert_size_stride(primals_14, (256, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_15, (256,), (1,))
assert_size_stride(primals_16, (256, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_17, (256,), (1,))
assert_size_stride(primals_18, (512, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_19, (512,), (1,))
assert_size_stride(primals_20, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_21, (512,), (1,))
assert_size_stride(primals_22, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_23, (512,), (1,))
assert_size_stride(primals_24, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_25, (512,), (1,))
assert_size_stride(primals_26, (512, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_27, (512,), (1,))
assert_size_stride(primals_28, (512, 1024, 3, 3), (9216, 9, 3, 1))
assert_size_stride(primals_29, (512,), (1,))
assert_size_stride(primals_30, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_31, (256,), (1,))
assert_size_stride(primals_32, (256, 512, 3, 3), (4608, 9, 3, 1))
assert_size_stride(primals_33, (256,), (1,))
assert_size_stride(primals_34, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_35, (128,), (1,))
assert_size_stride(primals_36, (128, 256, 3, 3), (2304, 9, 3, 1))
assert_size_stride(primals_37, (128,), (1,))
assert_size_stride(primals_38, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_39, (64,), (1,))
assert_size_stride(primals_40, (64, 128, 3, 3), (1152, 9, 3, 1))
assert_size_stride(primals_41, (64,), (1,))
assert_size_stride(primals_42, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_43, (32,), (1,))
assert_size_stride(primals_44, (32, 64, 3, 3), (576, 9, 3, 1))
assert_size_stride(primals_45, (32,), (1,))
assert_size_stride(primals_46, (4, 32, 3, 3), (288, 9, 3, 1))
assert_size_stride(primals_47, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf1 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf2 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf0,
primals_2, buf1, buf2, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_2
buf3 = extern_kernels.convolution(buf2, primals_4, stride=(1, 1),
padding=(3, 3), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf4 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf5 = buf0
del buf0
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf3,
primals_5, buf4, buf5, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del primals_5
buf6 = empty_strided_cuda((4, 32, 32, 32), (32768, 1024, 32, 1),
torch.float32)
triton_poi_fused_avg_pool2d_1[grid(131072)](buf5, buf6, 131072,
XBLOCK=512, num_warps=8, num_stages=1)
buf7 = extern_kernels.convolution(buf6, primals_6, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf7, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf8 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
buf9 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf7,
primals_7, buf8, buf9, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_7
buf10 = extern_kernels.convolution(buf9, primals_8, stride=(1, 1),
padding=(2, 2), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf10, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf11 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
buf12 = buf7
del buf7
triton_poi_fused_convolution_leaky_relu_2[grid(262144)](buf10,
primals_9, buf11, buf12, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_9
buf13 = empty_strided_cuda((4, 64, 16, 16), (16384, 256, 16, 1),
torch.float32)
triton_poi_fused_avg_pool2d_3[grid(65536)](buf12, buf13, 65536,
XBLOCK=256, num_warps=4, num_stages=1)
buf14 = extern_kernels.convolution(buf13, primals_10, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf14, (4, 128, 16, 16), (32768, 256, 16, 1))
buf15 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
buf16 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.float32)
triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf14,
primals_11, buf15, buf16, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_11
buf17 = extern_kernels.convolution(buf16, primals_12, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf17, (4, 128, 16, 16), (32768, 256, 16, 1))
buf18 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
buf19 = buf14
del buf14
triton_poi_fused_convolution_leaky_relu_4[grid(131072)](buf17,
primals_13, buf18, buf19, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_13
buf20 = empty_strided_cuda((4, 128, 8, 8), (8192, 64, 8, 1), torch.
float32)
triton_poi_fused_avg_pool2d_5[grid(32768)](buf19, buf20, 32768,
XBLOCK=128, num_warps=4, num_stages=1)
buf21 = extern_kernels.convolution(buf20, primals_14, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf21, (4, 256, 8, 8), (16384, 64, 8, 1))
buf22 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
buf23 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.float32)
triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf21,
primals_15, buf22, buf23, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_15
buf24 = extern_kernels.convolution(buf23, primals_16, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf24, (4, 256, 8, 8), (16384, 64, 8, 1))
buf25 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
buf26 = buf21
del buf21
triton_poi_fused_convolution_leaky_relu_6[grid(65536)](buf24,
primals_17, buf25, buf26, 65536, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_17
buf27 = empty_strided_cuda((4, 256, 4, 4), (4096, 16, 4, 1), torch.
float32)
triton_poi_fused_avg_pool2d_7[grid(16384)](buf26, buf27, 16384,
XBLOCK=128, num_warps=4, num_stages=1)
buf28 = extern_kernels.convolution(buf27, primals_18, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf28, (4, 512, 4, 4), (8192, 16, 4, 1))
buf29 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
buf30 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf28,
primals_19, buf29, buf30, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_19
buf31 = extern_kernels.convolution(buf30, primals_20, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf31, (4, 512, 4, 4), (8192, 16, 4, 1))
buf32 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
buf33 = buf28
del buf28
triton_poi_fused_convolution_leaky_relu_8[grid(32768)](buf31,
primals_21, buf32, buf33, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del primals_21
buf34 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.
float32)
triton_poi_fused_avg_pool2d_9[grid(8192)](buf33, buf34, 8192,
XBLOCK=128, num_warps=4, num_stages=1)
buf35 = extern_kernels.convolution(buf34, primals_22, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf35, (4, 512, 2, 2), (2048, 4, 2, 1))
buf36 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool)
buf37 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.
float32)
triton_poi_fused_convolution_leaky_relu_10[grid(8192)](buf35,
primals_23, buf36, buf37, 8192, XBLOCK=256, num_warps=4,
num_stages=1)
del buf35
del primals_23
buf38 = extern_kernels.convolution(buf37, primals_24, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf38, (4, 512, 2, 2), (2048, 4, 2, 1))
buf39 = empty_strided_cuda((4, 512, 2, 2), (2048, 4, 2, 1), torch.bool)
triton_poi_fused_convolution_leaky_relu_11[grid(8192)](buf38,
primals_25, buf39, 8192, XBLOCK=128, num_warps=4, num_stages=1)
buf40 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_12[grid(4)](buf40, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf41 = empty_strided_cuda((4, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_13[grid(4)](buf41, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf42 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused__to_copy_12[grid(4)](buf42, 4, XBLOCK=4, num_warps
=1, num_stages=1)
buf43 = empty_strided_cuda((4,), (1,), torch.int64)
triton_poi_fused_add_clamp_13[grid(4)](buf43, 4, XBLOCK=4,
num_warps=1, num_stages=1)
buf46 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(4)](buf46,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf48 = empty_strided_cuda((4, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_14[grid(4)](buf48,
4, XBLOCK=4, num_warps=1, num_stages=1)
buf45 = buf31
del buf31
buf49 = buf45
del buf45
buf50 = buf49
del buf49
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_15[
grid(32768)](buf50, buf41, buf42, buf39, buf38, primals_25,
buf40, buf43, buf46, buf48, 32768, XBLOCK=128, num_warps=4,
num_stages=1)
del buf38
del primals_25
buf51 = extern_kernels.convolution(buf50, primals_26, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf51, (4, 512, 4, 4), (8192, 16, 4, 1))
buf52 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_leaky_relu_16[grid(32768)](buf51,
primals_27, buf52, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf53 = reinterpret_tensor(buf24, (4, 1024, 4, 4), (16384, 16, 4, 1), 0
)
del buf24
triton_poi_fused_cat_17[grid(65536)](buf52, buf51, primals_27,
buf33, buf53, 65536, XBLOCK=256, num_warps=4, num_stages=1)
del buf51
del primals_27
buf54 = extern_kernels.convolution(buf53, primals_28, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf54, (4, 512, 4, 4), (8192, 16, 4, 1))
buf55 = empty_strided_cuda((4, 512, 4, 4), (8192, 16, 4, 1), torch.bool
)
triton_poi_fused_convolution_leaky_relu_16[grid(32768)](buf54,
primals_29, buf55, 32768, XBLOCK=128, num_warps=4, num_stages=1)
buf56 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_18[grid(8)](buf56, 8, XBLOCK=8, num_warps
=1, num_stages=1)
buf57 = empty_strided_cuda((8, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_19[grid(8)](buf57, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf58 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused__to_copy_18[grid(8)](buf58, 8, XBLOCK=8, num_warps
=1, num_stages=1)
buf59 = empty_strided_cuda((8,), (1,), torch.int64)
triton_poi_fused_add_clamp_19[grid(8)](buf59, 8, XBLOCK=8,
num_warps=1, num_stages=1)
buf62 = empty_strided_cuda((8,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20[grid(8)](buf62,
8, XBLOCK=8, num_warps=1, num_stages=1)
buf64 = empty_strided_cuda((8, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_20[grid(8)](buf64,
8, XBLOCK=8, num_warps=1, num_stages=1)
buf61 = reinterpret_tensor(buf17, (4, 512, 8, 8), (32768, 64, 8, 1), 0)
del buf17
buf65 = buf61
del buf61
buf66 = buf65
del buf65
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_21[
grid(131072)](buf66, buf57, buf58, buf55, buf54, primals_29,
buf56, buf59, buf62, buf64, 131072, XBLOCK=512, num_warps=8,
num_stages=1)
del buf54
del primals_29
buf67 = extern_kernels.convolution(buf66, primals_30, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf67, (4, 256, 8, 8), (16384, 64, 8, 1))
buf68 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_leaky_relu_22[grid(65536)](buf67,
primals_31, buf68, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf69 = empty_strided_cuda((4, 512, 8, 8), (32768, 64, 8, 1), torch
.float32)
triton_poi_fused_cat_23[grid(131072)](buf68, buf67, primals_31,
buf26, buf69, 131072, XBLOCK=512, num_warps=8, num_stages=1)
del buf67
del primals_31
buf70 = extern_kernels.convolution(buf69, primals_32, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf70, (4, 256, 8, 8), (16384, 64, 8, 1))
buf71 = empty_strided_cuda((4, 256, 8, 8), (16384, 64, 8, 1), torch
.bool)
triton_poi_fused_convolution_leaky_relu_22[grid(65536)](buf70,
primals_33, buf71, 65536, XBLOCK=512, num_warps=4, num_stages=1)
buf72 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_24[grid(16)](buf72, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf73 = empty_strided_cuda((16, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_25[grid(16)](buf73, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf74 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused__to_copy_24[grid(16)](buf74, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf75 = empty_strided_cuda((16,), (1,), torch.int64)
triton_poi_fused_add_clamp_25[grid(16)](buf75, 16, XBLOCK=16,
num_warps=1, num_stages=1)
buf78 = empty_strided_cuda((16,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26[grid(16)](buf78,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf80 = empty_strided_cuda((16, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_26[grid(16)](buf80,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf77 = reinterpret_tensor(buf10, (4, 256, 16, 16), (65536, 256, 16,
1), 0)
del buf10
buf81 = buf77
del buf77
buf82 = buf81
del buf81
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_27[
grid(262144)](buf82, buf73, buf74, buf71, buf70, primals_33,
buf72, buf75, buf78, buf80, 262144, XBLOCK=512, num_warps=8,
num_stages=1)
del primals_33
buf83 = extern_kernels.convolution(buf82, primals_34, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf83, (4, 128, 16, 16), (32768, 256, 16, 1))
buf84 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_28[grid(131072)](buf83,
primals_35, buf84, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
buf85 = empty_strided_cuda((4, 256, 16, 16), (65536, 256, 16, 1),
torch.float32)
triton_poi_fused_cat_29[grid(262144)](buf84, buf83, primals_35,
buf19, buf85, 262144, XBLOCK=512, num_warps=8, num_stages=1)
del buf83
del primals_35
buf86 = extern_kernels.convolution(buf85, primals_36, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf86, (4, 128, 16, 16), (32768, 256, 16, 1))
buf87 = empty_strided_cuda((4, 128, 16, 16), (32768, 256, 16, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_28[grid(131072)](buf86,
primals_37, buf87, 131072, XBLOCK=1024, num_warps=4, num_stages=1)
buf88 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_30[grid(32)](buf88, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf89 = empty_strided_cuda((32, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_31[grid(32)](buf89, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf90 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused__to_copy_30[grid(32)](buf90, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf91 = empty_strided_cuda((32,), (1,), torch.int64)
triton_poi_fused_add_clamp_31[grid(32)](buf91, 32, XBLOCK=32,
num_warps=1, num_stages=1)
buf94 = empty_strided_cuda((32,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32[grid(32)](buf94,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf96 = empty_strided_cuda((32, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_32[grid(32)](buf96,
32, XBLOCK=32, num_warps=1, num_stages=1)
buf93 = reinterpret_tensor(buf3, (4, 128, 32, 32), (131072, 1024,
32, 1), 0)
del buf3
buf97 = buf93
del buf93
buf98 = buf97
del buf97
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_33[
grid(524288)](buf98, buf89, buf90, buf87, buf86, primals_37,
buf88, buf91, buf94, buf96, 524288, XBLOCK=512, num_warps=8,
num_stages=1)
del buf86
del primals_37
buf99 = extern_kernels.convolution(buf98, primals_38, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf99, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf100 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_34[grid(262144)](buf99,
primals_39, buf100, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
buf101 = empty_strided_cuda((4, 128, 32, 32), (131072, 1024, 32, 1),
torch.float32)
triton_poi_fused_cat_35[grid(524288)](buf100, buf99, primals_39,
buf12, buf101, 524288, XBLOCK=512, num_warps=8, num_stages=1)
del buf99
del primals_39
buf102 = extern_kernels.convolution(buf101, primals_40, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf102, (4, 64, 32, 32), (65536, 1024, 32, 1))
buf103 = empty_strided_cuda((4, 64, 32, 32), (65536, 1024, 32, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_34[grid(262144)](buf102,
primals_41, buf103, 262144, XBLOCK=1024, num_warps=4, num_stages=1)
buf104 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused__to_copy_36[grid(64)](buf104, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf105 = empty_strided_cuda((64, 1), (1, 1), torch.int64)
triton_poi_fused_add_clamp_37[grid(64)](buf105, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf106 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused__to_copy_36[grid(64)](buf106, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf107 = empty_strided_cuda((64,), (1,), torch.int64)
triton_poi_fused_add_clamp_37[grid(64)](buf107, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf110 = empty_strided_cuda((64,), (1,), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38[grid(64)](buf110,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf112 = empty_strided_cuda((64, 1), (1, 1), torch.float32)
triton_poi_fused__to_copy_add_arange_clamp_mul_sub_38[grid(64)](buf112,
64, XBLOCK=64, num_warps=1, num_stages=1)
buf109 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
buf113 = buf109
del buf109
buf114 = buf113
del buf113
triton_poi_fused__unsafe_index_add_convolution_leaky_relu_mul_sub_39[
grid(1048576)](buf114, buf105, buf106, buf103, buf102,
primals_41, buf104, buf107, buf110, buf112, 1048576, XBLOCK=
1024, num_warps=4, num_stages=1)
del buf102
del primals_41
buf115 = extern_kernels.convolution(buf114, primals_42, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf115, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf116 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
triton_poi_fused_convolution_leaky_relu_40[grid(524288)](buf115,
primals_43, buf116, 524288, XBLOCK=1024, num_warps=4, num_stages=1)
buf117 = empty_strided_cuda((4, 64, 64, 64), (262144, 4096, 64, 1),
torch.float32)
triton_poi_fused_cat_41[grid(1048576)](buf116, buf115, primals_43,
buf5, buf117, 1048576, XBLOCK=512, num_warps=8, num_stages=1)
del primals_43
buf118 = extern_kernels.convolution(buf117, primals_44, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf118, (4, 32, 64, 64), (131072, 4096, 64, 1))
buf119 = empty_strided_cuda((4, 32, 64, 64), (131072, 4096, 64, 1),
torch.bool)
buf120 = buf115
del buf115
triton_poi_fused_convolution_leaky_relu_0[grid(524288)](buf118,
primals_45, buf119, buf120, 524288, XBLOCK=1024, num_warps=4,
num_stages=1)
del buf118
del primals_45
buf121 = extern_kernels.convolution(buf120, primals_46, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf121, (4, 4, 64, 64), (16384, 4096, 64, 1))
buf122 = empty_strided_cuda((4, 4, 64, 64), (16384, 4096, 64, 1),
torch.bool)
buf123 = reinterpret_tensor(buf70, (4, 4, 64, 64), (16384, 4096, 64,
1), 0)
del buf70
triton_poi_fused_convolution_leaky_relu_42[grid(65536)](buf121,
primals_47, buf122, buf123, 65536, XBLOCK=512, num_warps=4,
num_stages=1)
del buf121
del primals_47
return (buf123, primals_1, primals_3, primals_4, primals_6, primals_8,
primals_10, primals_12, primals_14, primals_16, primals_18,
primals_20, primals_22, primals_24, primals_26, primals_28,
primals_30, primals_32, primals_34, primals_36, primals_38,
primals_40, primals_42, primals_44, primals_46, buf1, buf2, buf4,
buf5, buf6, buf8, buf9, buf11, buf12, buf13, buf15, buf16, buf18,
buf19, buf20, buf22, buf23, buf25, buf26, buf27, buf29, buf30,
buf32, buf33, buf34, buf36, buf37, buf39, buf40, buf41, buf42,
buf43, buf46, buf48, buf50, buf52, buf53, buf55, buf56, buf57,
buf58, buf59, buf62, buf64, buf66, buf68, buf69, buf71, buf72,
buf73, buf74, buf75, buf78, buf80, buf82, buf84, buf85, buf87,
buf88, buf89, buf90, buf91, buf94, buf96, buf98, buf100, buf101,
buf103, buf104, buf105, buf106, buf107, buf110, buf112, buf114,
buf116, buf117, buf119, buf120, buf122)
class down(nn.Module):
"""
A class for creating neural network blocks containing layers:
Average Pooling --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels, filterSize):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used as input and output channels for the
second convolutional layer.
filterSize : int
filter size for the convolution filter. input N would create
a N x N filter.
"""
super(down, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, filterSize, stride=
1, padding=int((filterSize - 1) / 2))
self.conv2 = nn.Conv2d(outChannels, outChannels, filterSize, stride
=1, padding=int((filterSize - 1) / 2))
def forward(self, x):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
x = F.avg_pool2d(x, 2)
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(x), negative_slope=0.1)
return x
class up(nn.Module):
"""
A class for creating neural network blocks containing layers:
Bilinear interpolation --> Convlution + Leaky ReLU --> Convolution + Leaky ReLU
This is used in the UNet Class to create a UNet like NN architecture.
...
Methods
-------
forward(x, skpCn)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the first convolutional layer.
outChannels : int
number of output channels for the first convolutional layer.
This is also used for setting input and output channels for
the second convolutional layer.
"""
super(up, self).__init__()
self.conv1 = nn.Conv2d(inChannels, outChannels, 3, stride=1, padding=1)
self.conv2 = nn.Conv2d(2 * outChannels, outChannels, 3, stride=1,
padding=1)
def forward(self, x, skpCn):
"""
Returns output tensor after passing input `x` to the neural network
block.
Parameters
----------
x : tensor
input to the NN block.
skpCn : tensor
skip connection input to the NN block.
Returns
-------
tensor
output of the NN block.
"""
x = F.interpolate(x, scale_factor=2, mode='bilinear')
x = F.leaky_relu(self.conv1(x), negative_slope=0.1)
x = F.leaky_relu(self.conv2(torch.cat((x, skpCn), 1)),
negative_slope=0.1)
return x
class UNetNew(nn.Module):
"""
A class for creating UNet like architecture as specified by the
Super SloMo paper.
...
Methods
-------
forward(x)
Returns output tensor after passing input `x` to the neural network
block.
"""
def __init__(self, inChannels, outChannels):
"""
Parameters
----------
inChannels : int
number of input channels for the UNet.
outChannels : int
number of output channels for the UNet.
"""
super(UNetNew, self).__init__()
self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)
self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)
self.down1 = down(32, 64, 5)
self.down2 = down(64, 128, 3)
self.down3 = down(128, 256, 3)
self.down4 = down(256, 512, 3)
self.down5 = down(512, 512, 3)
self.up1 = up(512, 512)
self.up2 = up(512, 256)
self.up3 = up(256, 128)
self.up4 = up(128, 64)
self.up5 = up(64, 32)
self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_2 = self.conv1.bias
primals_4 = self.conv2.weight
primals_5 = self.conv2.bias
primals_6 = self.down1.conv1.weight
primals_7 = self.down1.conv1.bias
primals_8 = self.down1.conv2.weight
primals_9 = self.down1.conv2.bias
primals_10 = self.down2.conv1.weight
primals_11 = self.down2.conv1.bias
primals_12 = self.down2.conv2.weight
primals_13 = self.down2.conv2.bias
primals_14 = self.down3.conv1.weight
primals_15 = self.down3.conv1.bias
primals_16 = self.down3.conv2.weight
primals_17 = self.down3.conv2.bias
primals_18 = self.down4.conv1.weight
primals_19 = self.down4.conv1.bias
primals_20 = self.down4.conv2.weight
primals_21 = self.down4.conv2.bias
primals_22 = self.down5.conv1.weight
primals_23 = self.down5.conv1.bias
primals_24 = self.down5.conv2.weight
primals_25 = self.down5.conv2.bias
primals_26 = self.up1.conv1.weight
primals_27 = self.up1.conv1.bias
primals_28 = self.up1.conv2.weight
primals_29 = self.up1.conv2.bias
primals_30 = self.up2.conv1.weight
primals_31 = self.up2.conv1.bias
primals_32 = self.up2.conv2.weight
primals_33 = self.up2.conv2.bias
primals_34 = self.up3.conv1.weight
primals_35 = self.up3.conv1.bias
primals_36 = self.up3.conv2.weight
primals_37 = self.up3.conv2.bias
primals_38 = self.up4.conv1.weight
primals_39 = self.up4.conv1.bias
primals_40 = self.up4.conv2.weight
primals_41 = self.up4.conv2.bias
primals_42 = self.up5.conv1.weight
primals_43 = self.up5.conv1.bias
primals_44 = self.up5.conv2.weight
primals_45 = self.up5.conv2.bias
primals_46 = self.conv3.weight
primals_47 = self.conv3.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12, primals_13, primals_14,
primals_15, primals_16, primals_17, primals_18, primals_19,
primals_20, primals_21, primals_22, primals_23, primals_24,
primals_25, primals_26, primals_27, primals_28, primals_29,
primals_30, primals_31, primals_32, primals_33, primals_34,
primals_35, primals_36, primals_37, primals_38, primals_39,
primals_40, primals_41, primals_42, primals_43, primals_44,
primals_45, primals_46, primals_47])
return output[0]
|
CM-BF/FeatureFlow
|
UNet
| false | 13,711 |
[
"MIT"
] | 161 |
06642697922f17211e5faa353e24b1a0946885b1
|
https://github.com/CM-BF/FeatureFlow/tree/06642697922f17211e5faa353e24b1a0946885b1
|
PA
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/cf/ccffwnd4sq3sztv4dcw45c3j2dsqwq3jy7vc3mqe4l5j4dxdabmr.py
# Topologically Sorted Source Nodes: [conv2d, sigmoid, mul], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
# Source node to ATen node mapping:
# conv2d => convolution
# mul => mul
# sigmoid => sigmoid
# Graph fragment:
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%primals_3, %primals_1, %primals_2, [1, 1], [1, 1], [1, 1], False, [0, 0], 4), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%convolution,), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_3, %sigmoid), kwargs = {})
triton_poi_fused_convolution_mul_sigmoid_0 = async_compile.triton('triton_poi_fused_convolution_mul_sigmoid_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_mul_sigmoid_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0; del buf0 # reuse
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d, sigmoid, mul], Original ATen: [aten.convolution, aten.sigmoid, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0.run(buf1, primals_2, primals_3, buf2, 256, grid=grid(256), stream=stream0)
del primals_2
return (buf2, primals_1, primals_3, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 1, 3, 3), (9, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class PA(nn.Module):
def __init__(self, dim):
super().__init__()
self.pa_conv = nn.Conv2d(dim, dim, 3, 1, 1, groups=dim)
def forward(self, x):
return x * self.pa_conv(x).sigmoid()
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_mul_sigmoid_0(in_out_ptr0, in_ptr0,
in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = tl.sigmoid(tmp2)
tmp5 = tmp3 * tmp4
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 1, 3, 3), (9, 9, 3, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_3, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=4, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = buf0
del buf0
buf2 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_mul_sigmoid_0[grid(256)](buf1,
primals_2, primals_3, buf2, 256, XBLOCK=256, num_warps=4,
num_stages=1)
del primals_2
return buf2, primals_1, primals_3, buf1
class PANew(nn.Module):
def __init__(self, dim):
super().__init__()
self.pa_conv = nn.Conv2d(dim, dim, 3, 1, 1, groups=dim)
def forward(self, input_0):
primals_1 = self.pa_conv.weight
primals_2 = self.pa_conv.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Genevievekim/semantic-segmentation-1
|
PA
| false | 13,712 |
[
"BSD-3-Clause"
] | 196 |
f28b026e44cff80fe3ca4cac94cea27e4073821b
|
https://github.com/Genevievekim/semantic-segmentation-1/tree/f28b026e44cff80fe3ca4cac94cea27e4073821b
|
BasicBlock_AP
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3j/c3jk4fd45xsskb354bmqh5ayvalm334wxs72twddoal7gsrew3wi.py
# Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu]
# Source node to ATen node mapping:
# group_norm => add, add_1, mul_1, rsqrt, var_mean
# out => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
triton_per_fused_native_group_norm_relu_0 = async_compile.triton('triton_per_fused_native_group_norm_relu_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: 'i32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_relu_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + (16*x0)), tmp29, xmask)
tl.store(out_ptr3 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/m4/cm4pnz5tuthpgeaoduefuxxbcl6vfg4dshuxv6f2jkjmtbmnax4p.py
# Topologically Sorted Source Nodes: [out_1, out_2, out_3], Original ATen: [aten.native_group_norm, aten.add, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => add_2, add_3, mul_3, rsqrt_1, var_mean_1
# out_2 => add_4
# out_3 => relu_1
# Graph fragment:
# %var_mean_1 : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_2, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add_2 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem_2, 1e-05), kwargs = {})
# %rsqrt_1 : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add_2,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_3, %unsqueeze_11), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_3, %unsqueeze_8), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_3, %primals_2), kwargs = {})
# %relu_1 : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_4,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu_1, 0), kwargs = {})
triton_per_fused_add_native_group_norm_relu_threshold_backward_1 = async_compile.triton('triton_per_fused_add_native_group_norm_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: '*fp32', 8: 'i32', 9: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_add_native_group_norm_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_add_native_group_norm_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (x2), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (x2), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr2 + (r1 + (16*x0)), tmp31, xmask)
tl.store(out_ptr3 + (r1 + (16*x0)), tmp33, xmask)
tl.store(out_ptr4 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [group_norm, out], Original ATen: [aten.native_group_norm, aten.relu]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0.run(buf0, primals_3, primals_4, buf1, buf5, buf4, 16, 16, grid=grid(16), stream=stream0)
del primals_4
# Topologically Sorted Source Nodes: [conv2d_1], Original ATen: [aten.convolution]
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
# Topologically Sorted Source Nodes: [out_1, out_2, out_3], Original ATen: [aten.native_group_norm, aten.add, aten.relu, aten.threshold_backward]
triton_per_fused_add_native_group_norm_relu_threshold_backward_1.run(buf6, primals_6, primals_7, primals_2, buf7, buf11, buf12, buf10, 16, 16, grid=grid(16), stream=stream0)
del primals_7
return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6, buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0), reinterpret_tensor(buf4, (4, 4), (4, 1), 0), buf5, buf6, reinterpret_tensor(buf7, (4, 4), (4, 1), 0), reinterpret_tensor(buf10, (4, 4), (4, 1), 0), buf12, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock_AP(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, norm='instancenorm'):
super(BasicBlock_AP, self).__init__()
self.norm = norm
self.stride = stride
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn1 = nn.GroupNorm(planes, planes, affine=True
) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = nn.GroupNorm(planes, planes, affine=True
) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=1, bias=False),
nn.AvgPool2d(kernel_size=2, stride=2), nn.GroupNorm(self.
expansion * planes, self.expansion * planes, affine=True) if
self.norm == 'instancenorm' else nn.BatchNorm2d(self.
expansion * planes))
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
if self.stride != 1:
out = F.avg_pool2d(out, kernel_size=2, stride=2)
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_planes': 4, 'planes': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_0(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, out_ptr2, out_ptr3, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tl.store(out_ptr2 + (r1 + 16 * x0), tmp29, xmask)
tl.store(out_ptr3 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
@triton.jit
def triton_per_fused_add_native_group_norm_relu_threshold_backward_1(in_ptr0,
in_ptr1, in_ptr2, in_ptr3, out_ptr0, out_ptr2, out_ptr3, out_ptr4,
xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
x2 = xindex % 4
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + x2, xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + x2, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 16.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp29 = tmp27 + tmp28
tmp30 = tl.full([1, 1], 0, tl.int32)
tmp31 = triton_helpers.maximum(tmp30, tmp29)
tmp32 = 0.0
tmp33 = tmp31 <= tmp32
tl.store(out_ptr2 + (r1 + 16 * x0), tmp31, xmask)
tl.store(out_ptr3 + (r1 + 16 * x0), tmp33, xmask)
tl.store(out_ptr4 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf4 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_0[grid(16)](buf0, primals_3,
primals_4, buf1, buf5, buf4, 16, 16, XBLOCK=8, num_warps=2,
num_stages=1)
del primals_4
buf6 = extern_kernels.convolution(buf5, primals_5, stride=(1, 1),
padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf6, (4, 4, 4, 4), (64, 16, 4, 1))
buf7 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf11 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf12 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf10 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
triton_per_fused_add_native_group_norm_relu_threshold_backward_1[grid
(16)](buf6, primals_6, primals_7, primals_2, buf7, buf11, buf12,
buf10, 16, 16, XBLOCK=8, num_warps=2, num_stages=1)
del primals_7
return (buf11, primals_1, primals_2, primals_3, primals_5, primals_6,
buf0, reinterpret_tensor(buf1, (4, 4), (4, 1), 0),
reinterpret_tensor(buf4, (4, 4), (4, 1), 0), buf5, buf6,
reinterpret_tensor(buf7, (4, 4), (4, 1), 0), reinterpret_tensor(
buf10, (4, 4), (4, 1), 0), buf12)
class BasicBlock_APNew(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, norm='instancenorm'):
super(BasicBlock_APNew, self).__init__()
self.norm = norm
self.stride = stride
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn1 = nn.GroupNorm(planes, planes, affine=True
) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.bn2 = nn.GroupNorm(planes, planes, affine=True
) if self.norm == 'instancenorm' else nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion * planes:
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, self.
expansion * planes, kernel_size=1, stride=1, bias=False),
nn.AvgPool2d(kernel_size=2, stride=2), nn.GroupNorm(self.
expansion * planes, self.expansion * planes, affine=True) if
self.norm == 'instancenorm' else nn.BatchNorm2d(self.
expansion * planes))
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.bn1.weight
primals_4 = self.bn1.bias
primals_5 = self.conv2.weight
primals_6 = self.bn2.weight
primals_7 = self.bn2.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7])
return output[0]
|
GeorgeCazenavette/mtt-distillation
|
BasicBlock_AP
| false | 13,713 |
[
"MIT"
] | 105 |
e13a65980183fbc33238ca6cbb6cfec819018e2d
|
https://github.com/GeorgeCazenavette/mtt-distillation/tree/e13a65980183fbc33238ca6cbb6cfec819018e2d
|
SqueezeExcitation
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/l3/cl35tzbhrd24dhunkbb6gjs54aklpyr46oikqhoylcgmkcmhujil.py
# Topologically Sorted Source Nodes: [scale], Original ATen: [aten.mean]
# Source node to ATen node mapping:
# scale => mean
# Graph fragment:
# %mean : [num_users=2] = call_function[target=torch.ops.aten.mean.dim](args = (%primals_1, [-1, -2], True), kwargs = {})
triton_per_fused_mean_0 = async_compile.triton('triton_per_fused_mean_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[16, 16],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_mean_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 1, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 16
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (16*x0)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + (x0), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/sh/cshbgrlhlsuuebcz7jbje66sr2nkeng6kilqpqluwrr5ru2afxle.py
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
# Source node to ATen node mapping:
# conv2d => convolution
# relu => relu
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%mean, %primals_2, %primals_3, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%convolution,), kwargs = {})
triton_poi_fused_convolution_relu_1 = async_compile.triton('triton_poi_fused_convolution_relu_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[32],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_relu_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/22/c22ve3r4y5fft3g2xdkwnomz747tvayj57gf6zszmac5hjy65wgt.py
# Topologically Sorted Source Nodes: [scale_1, scale_2, mul], Original ATen: [aten.convolution, aten.hardsigmoid, aten.mul]
# Source node to ATen node mapping:
# mul => mul
# scale_1 => convolution_1
# scale_2 => add, clamp_max, clamp_min, div
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%convolution_1, 3), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%add, 0), kwargs = {})
# %clamp_max : [num_users=1] = call_function[target=torch.ops.aten.clamp_max.default](args = (%clamp_min, 6), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%clamp_max, 6), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%div, %primals_1), kwargs = {})
triton_poi_fused_convolution_hardsigmoid_mul_2 = async_compile.triton('triton_poi_fused_convolution_hardsigmoid_mul_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_hardsigmoid_mul_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_mul_2(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = (xindex // 16)
x1 = (xindex // 16) % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x3), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + (x4), xmask)
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 6.0
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tmp9 = 0.16666666666666666
tmp10 = tmp8 * tmp9
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + (x4), tmp12, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/hv/chvrs33e2wyvaktcs4kouk236tzwn7sho4sgx2yjf7sjc72okw6d.py
# Topologically Sorted Source Nodes: [scale_1], Original ATen: [aten.convolution, aten.hardsigmoid_backward]
# Source node to ATen node mapping:
# scale_1 => convolution_1
# Graph fragment:
# %convolution_1 : [num_users=3] = call_function[target=torch.ops.aten.convolution.default](args = (%relu, %primals_4, %primals_5, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%convolution_1, -3.0), kwargs = {})
# %lt : [num_users=1] = call_function[target=torch.ops.aten.lt.Scalar](args = (%convolution_1, 3.0), kwargs = {})
# %bitwise_and : [num_users=1] = call_function[target=torch.ops.aten.bitwise_and.Tensor](args = (%gt, %lt), kwargs = {})
triton_poi_fused_convolution_hardsigmoid_backward_3 = async_compile.triton('triton_poi_fused_convolution_hardsigmoid_backward_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_hardsigmoid_backward_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_backward_3(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -3.0
tmp4 = tmp2 > tmp3
tmp5 = 3.0
tmp6 = tmp2 < tmp5
tmp7 = tmp4 & tmp6
tl.store(out_ptr0 + (x2), tmp7, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (8, ), (1, ))
assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [scale], Original ATen: [aten.mean]
stream0 = get_raw_stream(0)
triton_per_fused_mean_0.run(buf1, primals_1, 16, 16, grid=grid(16), stream=stream0)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1))
buf3 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [conv2d, relu], Original ATen: [aten.convolution, aten.relu]
triton_poi_fused_convolution_relu_1.run(buf3, primals_3, 32, grid=grid(32), stream=stream0)
del primals_3
# Topologically Sorted Source Nodes: [scale_1], Original ATen: [aten.convolution]
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [scale_1, scale_2, mul], Original ATen: [aten.convolution, aten.hardsigmoid, aten.mul]
triton_poi_fused_convolution_hardsigmoid_mul_2.run(buf4, primals_5, primals_1, buf5, 256, grid=grid(256), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
# Topologically Sorted Source Nodes: [scale_1], Original ATen: [aten.convolution, aten.hardsigmoid_backward]
triton_poi_fused_convolution_hardsigmoid_backward_3.run(buf4, primals_5, buf6, 16, grid=grid(16), stream=stream0)
del buf4
del primals_5
return (buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((8, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((8, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 8, 1, 1), (8, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import Tensor
from typing import Optional
from torch import nn
from torch.nn import functional as F
def _make_divisible(v: 'float', divisor: 'int', min_value: 'Optional[int]'=None
) ->int:
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class SqueezeExcitation(nn.Module):
def __init__(self, ch, squeeze_factor=4):
super().__init__()
squeeze_ch = _make_divisible(ch // squeeze_factor, 8)
self.fc1 = nn.Conv2d(ch, squeeze_ch, 1)
self.relu = nn.ReLU(True)
self.fc2 = nn.Conv2d(squeeze_ch, ch, 1)
def _scale(self, x: 'Tensor') ->Tensor:
scale = F.adaptive_avg_pool2d(x, 1)
scale = self.fc2(self.relu(self.fc1(scale)))
return F.hardsigmoid(scale, True)
def forward(self, x: 'Tensor') ->Tensor:
scale = self._scale(x)
return scale * x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ch': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import Tensor
from typing import Optional
from torch import nn
from torch.nn import functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_mean_0(in_out_ptr0, in_ptr0, xnumel, rnumel, XBLOCK:
tl.constexpr):
xnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 16 * x0), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.sum(tmp3, 1)[:, None]
tmp5 = 16.0
tmp6 = tmp4 / tmp5
tl.debug_barrier()
tl.store(in_out_ptr0 + x0, tmp6, xmask)
@triton.jit
def triton_poi_fused_convolution_relu_1(in_out_ptr0, in_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 32
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 8
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_mul_2(in_ptr0, in_ptr1,
in_ptr2, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex // 16
x1 = xindex // 16 % 4
x4 = xindex
tmp0 = tl.load(in_ptr0 + x3, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr2 + x4, xmask)
tmp2 = tmp0 + tmp1
tmp3 = 3.0
tmp4 = tmp2 + tmp3
tmp5 = 0.0
tmp6 = triton_helpers.maximum(tmp4, tmp5)
tmp7 = 6.0
tmp8 = triton_helpers.minimum(tmp6, tmp7)
tmp9 = 0.16666666666666666
tmp10 = tmp8 * tmp9
tmp12 = tmp10 * tmp11
tl.store(out_ptr0 + x4, tmp12, xmask)
@triton.jit
def triton_poi_fused_convolution_hardsigmoid_backward_3(in_ptr0, in_ptr1,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = -3.0
tmp4 = tmp2 > tmp3
tmp5 = 3.0
tmp6 = tmp2 < tmp5
tmp7 = tmp4 & tmp6
tl.store(out_ptr0 + x2, tmp7, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (8, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_3, (8,), (1,))
assert_size_stride(primals_4, (4, 8, 1, 1), (8, 1, 1, 1))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 16, 16), torch.float32)
buf1 = reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0)
del buf0
get_raw_stream(0)
triton_per_fused_mean_0[grid(16)](buf1, primals_1, 16, 16, XBLOCK=1,
num_warps=2, num_stages=1)
buf2 = extern_kernels.convolution(buf1, primals_2, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 8, 1, 1), (8, 1, 1, 1))
buf3 = buf2
del buf2
triton_poi_fused_convolution_relu_1[grid(32)](buf3, primals_3, 32,
XBLOCK=32, num_warps=1, num_stages=1)
del primals_3
buf4 = extern_kernels.convolution(buf3, primals_4, stride=(1, 1),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf4, (4, 4, 1, 1), (4, 1, 1, 1))
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_hardsigmoid_mul_2[grid(256)](buf4,
primals_5, primals_1, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1, 1), (4, 1, 1, 1), torch.bool)
triton_poi_fused_convolution_hardsigmoid_backward_3[grid(16)](buf4,
primals_5, buf6, 16, XBLOCK=16, num_warps=1, num_stages=1)
del buf4
del primals_5
return buf5, primals_1, primals_2, primals_4, buf1, buf3, buf6
def _make_divisible(v: 'float', divisor: 'int', min_value: 'Optional[int]'=None
) ->int:
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
if new_v < 0.9 * v:
new_v += divisor
return new_v
class SqueezeExcitationNew(nn.Module):
def __init__(self, ch, squeeze_factor=4):
super().__init__()
squeeze_ch = _make_divisible(ch // squeeze_factor, 8)
self.fc1 = nn.Conv2d(ch, squeeze_ch, 1)
self.relu = nn.ReLU(True)
self.fc2 = nn.Conv2d(squeeze_ch, ch, 1)
def _scale(self, x: 'Tensor') ->Tensor:
scale = F.adaptive_avg_pool2d(x, 1)
scale = self.fc2(self.relu(self.fc1(scale)))
return F.hardsigmoid(scale, True)
def forward(self, input_0):
primals_2 = self.fc1.weight
primals_3 = self.fc1.bias
primals_4 = self.fc2.weight
primals_5 = self.fc2.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Genevievekim/semantic-segmentation-1
|
SqueezeExcitation
| false | 13,714 |
[
"BSD-3-Clause"
] | 196 |
f28b026e44cff80fe3ca4cac94cea27e4073821b
|
https://github.com/Genevievekim/semantic-segmentation-1/tree/f28b026e44cff80fe3ca4cac94cea27e4073821b
|
GlobalAttention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/r6/cr6neze6yovkog6kjrk5k2db63h47ozkojywfys6karxe7dlumrz.py
# Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# align_vectors => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%view, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x2), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# align_vectors => div, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/ip/cip3p4ibqio6uu76ccsemd7wjusq5ptlow3dt2zxzouyuz2sqywf.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=1] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %primals_1], 2), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = (xindex // 8)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + ((4*x1) + x0), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 8, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tl.load(in_ptr1 + ((4*x1) + ((-4) + x0)), tmp6 & xmask, eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/f5/cf5pnuv5il7avsmzck3quom7r6zvcfuulsdwpzlv2epzfmcgqgwb.py
# Topologically Sorted Source Nodes: [attn_h_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# attn_h_2 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_2,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_3 = async_compile.triton('triton_poi_fused_clone_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_3', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + (x3), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/u4/cu4fypgfipklcxtitafatnyqdaatx5tws6qfndqotcy4qivcph6d.py
# Topologically Sorted Source Nodes: [align_vectors_2], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# align_vectors_2 => clone_1
# Graph fragment:
# %clone_1 : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute_3,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_4 = async_compile.triton('triton_poi_fused_clone_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4) % 4
x2 = (xindex // 16)
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x2) + (16*x1)), xmask)
tl.store(out_ptr0 + (x3), tmp0, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [align], Original ATen: [aten.bmm]
extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4, 4), (16, 1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf0, buf1, 64, grid=grid(64), stream=stream0)
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0); del buf0 # reuse
# Topologically Sorted Source Nodes: [align_vectors], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf1, buf2, 64, grid=grid(64), stream=stream0)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [c], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1), 0), primals_2, out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf3, primals_1, buf4, 128, grid=grid(128), stream=stream0)
del primals_1
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0); del buf3 # reuse
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0), reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5)
del primals_3
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [attn_h_2], Original ATen: [aten.clone]
triton_poi_fused_clone_3.run(buf5, buf6, 64, grid=grid(64), stream=stream0)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [align_vectors_2], Original ATen: [aten.clone]
triton_poi_fused_clone_4.run(buf2, buf7, 64, grid=grid(64), stream=stream0)
del buf2
return (buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 8), (8, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.utils.data
import torch.cuda
import torch.optim
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments
), 'Not all arguments have the same value: ' + str(args)
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0] * size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class BottleLinear(Bottle, nn.Linear):
pass
class GlobalAttention(nn.Module):
"""
Luong Attention.
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
H_1 H_2 H_3 ... H_n
q q q q
| | | |
\\ | | /
.....
\\ | /
a
Constructs a unit mapping.
$$(H_1 + H_n, q) => (a)$$
Where H is of `batch x n x dim` and q is of `batch x dim`.
Luong Attention (dot, general):
The full function is
$$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$.
* dot: $$score(h_t,{\\overline{h}}_s) = h_t^T{\\overline{h}}_s$$
* general: $$score(h_t,{\\overline{h}}_s) = h_t^T W_a {\\overline{h}}_s$$
Bahdanau Attention (mlp):
$$c = \\sum_{j=1}^{SeqLength}_jh_j$$.
The Alignment-function $$a$$ computes an alignment as:
$$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$.
"""
def __init__(self, dim, coverage=False, attn_type='dot'):
super(GlobalAttention, self).__init__()
self.dim = dim
self.attn_type = attn_type
assert self.attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type.'
if self.attn_type == 'general':
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = BottleLinear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = BottleLinear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
self.sm = nn.Softmax()
self.tanh = nn.Tanh()
self.mask = None
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def applyMask(self, mask):
self.mask = mask
def score(self, h_t, h_s):
"""
h_t (FloatTensor): batch x tgt_len x dim
h_s (FloatTensor): batch x src_len x dim
returns scores (FloatTensor): batch x tgt_len x src_len:
raw attention scores for each src index
"""
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = self.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input, context, coverage=None):
"""
input (FloatTensor): batch x tgt_len x dim: decoder's rnn's output.
context (FloatTensor): batch x src_len x dim: src hidden states
coverage (FloatTensor): None (not supported yet)
"""
if input.dim() == 2:
one_step = True
input = input.unsqueeze(1)
else:
one_step = False
batch, sourceL, dim = context.size()
batch_, targetL, dim_ = input.size()
aeq(batch, batch_)
aeq(dim, dim_)
aeq(self.dim, dim)
if coverage is not None:
batch_, sourceL_ = coverage.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
if self.mask is not None:
beam_, batch_, sourceL_ = self.mask.size()
aeq(batch, batch_ * beam_)
aeq(sourceL, sourceL_)
if coverage is not None:
cover = coverage.view(-1).unsqueeze(1)
context += self.linear_cover(cover).view_as(context)
context = self.tanh(context)
align = self.score(input, context)
if self.mask is not None:
mask_ = self.mask.view(batch, 1, sourceL)
align.data.masked_fill_(mask_, -float('inf'))
align_vectors = self.sm(align.view(batch * targetL, sourceL))
align_vectors = align_vectors.view(batch, targetL, sourceL)
c = torch.bmm(align_vectors, context)
concat_c = torch.cat([c, input], 2).view(batch * targetL, dim * 2)
attn_h = self.linear_out(concat_c).view(batch, targetL, dim)
if self.attn_type in ['general', 'dot']:
attn_h = self.tanh(attn_h)
if one_step:
attn_h = attn_h.squeeze(1)
align_vectors = align_vectors.squeeze(1)
batch_, dim_ = attn_h.size()
aeq(batch, batch_)
aeq(dim, dim_)
batch_, sourceL_ = align_vectors.size()
aeq(batch, batch_)
aeq(sourceL, sourceL_)
else:
attn_h = attn_h.transpose(0, 1).contiguous()
align_vectors = align_vectors.transpose(0, 1).contiguous()
targetL_, batch_, dim_ = attn_h.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(dim, dim_)
targetL_, batch_, sourceL_ = align_vectors.size()
aeq(targetL, targetL_)
aeq(batch, batch_)
aeq(sourceL, sourceL_)
return attn_h, align_vectors
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.utils.data
import torch.cuda
import torch.optim
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x2, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 8
x1 = xindex // 8
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 4, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (4 * x1 + x0), tmp4 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tl.full([1], 8, tl.int64)
tmp9 = tl.load(in_ptr1 + (4 * x1 + (-4 + x0)), tmp6 & xmask,
eviction_policy='evict_last', other=0.0)
tmp10 = tl.where(tmp4, tmp5, tmp9)
tl.store(out_ptr0 + x2, tmp10, xmask)
@triton.jit
def triton_poi_fused_clone_3(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tmp1 = libdevice.tanh(tmp0)
tl.store(out_ptr0 + x3, tmp1, xmask)
@triton.jit
def triton_poi_fused_clone_4(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4 % 4
x2 = xindex // 16
x3 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x2 + 16 * x1), xmask)
tl.store(out_ptr0 + x3, tmp0, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_3, (4, 8), (8, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(primals_1, reinterpret_tensor(primals_2, (4, 4,
4), (16, 1, 4), 0), out=buf0)
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf0, buf1, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf2 = reinterpret_tensor(buf0, (16, 4), (4, 1), 0)
del buf0
triton_poi_fused__softmax_1[grid(64)](buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf3 = reinterpret_tensor(buf1, (4, 4, 4), (16, 4, 1), 0)
del buf1
extern_kernels.bmm(reinterpret_tensor(buf2, (4, 4, 4), (16, 4, 1),
0), primals_2, out=buf3)
del primals_2
buf4 = empty_strided_cuda((4, 4, 8), (32, 8, 1), torch.float32)
triton_poi_fused_cat_2[grid(128)](buf3, primals_1, buf4, 128,
XBLOCK=128, num_warps=4, num_stages=1)
del primals_1
buf5 = reinterpret_tensor(buf3, (16, 4), (4, 1), 0)
del buf3
extern_kernels.mm(reinterpret_tensor(buf4, (16, 8), (8, 1), 0),
reinterpret_tensor(primals_3, (8, 4), (1, 8), 0), out=buf5)
del primals_3
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_3[grid(64)](buf5, buf6, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf7 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_clone_4[grid(64)](buf2, buf7, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf2
return buf6, buf7, reinterpret_tensor(buf4, (16, 8), (8, 1), 0), buf5
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments
), 'Not all arguments have the same value: ' + str(args)
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0] * size[1], -1))
return out.contiguous().view(size[0], size[1], -1)
class BottleLinear(Bottle, nn.Linear):
pass
class GlobalAttentionNew(nn.Module):
"""
Luong Attention.
Global attention takes a matrix and a query vector. It
then computes a parameterized convex combination of the matrix
based on the input query.
H_1 H_2 H_3 ... H_n
q q q q
| | | |
\\ | | /
.....
\\ | /
a
Constructs a unit mapping.
$$(H_1 + H_n, q) => (a)$$
Where H is of `batch x n x dim` and q is of `batch x dim`.
Luong Attention (dot, general):
The full function is
$$ anh(W_2 [(softmax((W_1 q + b_1) H) H), q] + b_2)$$.
* dot: $$score(h_t,{\\overline{h}}_s) = h_t^T{\\overline{h}}_s$$
* general: $$score(h_t,{\\overline{h}}_s) = h_t^T W_a {\\overline{h}}_s$$
Bahdanau Attention (mlp):
$$c = \\sum_{j=1}^{SeqLength}_jh_j$$.
The Alignment-function $$a$$ computes an alignment as:
$$a_j = softmax(v_a^T anh(W_a q + U_a h_j) )$$.
"""
def __init__(self, dim, coverage=False, attn_type='dot'):
super(GlobalAttentionNew, self).__init__()
self.dim = dim
self.attn_type = attn_type
assert self.attn_type in ['dot', 'general', 'mlp'
], 'Please select a valid attention type.'
if self.attn_type == 'general':
self.linear_in = nn.Linear(dim, dim, bias=False)
elif self.attn_type == 'mlp':
self.linear_context = BottleLinear(dim, dim, bias=False)
self.linear_query = nn.Linear(dim, dim, bias=True)
self.v = BottleLinear(dim, 1, bias=False)
out_bias = self.attn_type == 'mlp'
self.linear_out = nn.Linear(dim * 2, dim, bias=out_bias)
self.sm = nn.Softmax()
self.tanh = nn.Tanh()
self.mask = None
if coverage:
self.linear_cover = nn.Linear(1, dim, bias=False)
def applyMask(self, mask):
self.mask = mask
def score(self, h_t, h_s):
"""
h_t (FloatTensor): batch x tgt_len x dim
h_s (FloatTensor): batch x src_len x dim
returns scores (FloatTensor): batch x tgt_len x src_len:
raw attention scores for each src index
"""
src_batch, src_len, src_dim = h_s.size()
tgt_batch, tgt_len, tgt_dim = h_t.size()
aeq(src_batch, tgt_batch)
aeq(src_dim, tgt_dim)
aeq(self.dim, src_dim)
if self.attn_type in ['general', 'dot']:
if self.attn_type == 'general':
h_t_ = h_t.view(tgt_batch * tgt_len, tgt_dim)
h_t_ = self.linear_in(h_t_)
h_t = h_t_.view(tgt_batch, tgt_len, tgt_dim)
h_s_ = h_s.transpose(1, 2)
return torch.bmm(h_t, h_s_)
else:
dim = self.dim
wq = self.linear_query(h_t.view(-1, dim))
wq = wq.view(tgt_batch, tgt_len, 1, dim)
wq = wq.expand(tgt_batch, tgt_len, src_len, dim)
uh = self.linear_context(h_s.contiguous().view(-1, dim))
uh = uh.view(src_batch, 1, src_len, dim)
uh = uh.expand(src_batch, tgt_len, src_len, dim)
wquh = self.tanh(wq + uh)
return self.v(wquh.view(-1, dim)).view(tgt_batch, tgt_len, src_len)
def forward(self, input_0, input_1):
primals_3 = self.linear_out.weight
primals_1 = input_0
primals_2 = input_1
output = call([primals_1, primals_2, primals_3])
return output[0], output[1]
|
Flamexmt/LMA
|
GlobalAttention
| false | 13,715 |
[
"MIT"
] | 321 |
f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
|
https://github.com/Flamexmt/LMA/tree/f6fdec2d17a2d7a7733dd5a5745312bad392cdf3
|
IdfCombination
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/qz/cqza6p5fjiie2hfiu5dfjqqugrnzziwuwxzlhzy2aa7khopxjbym.py
# Topologically Sorted Source Nodes: [idf], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# idf => amax, exp, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [1], True), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg0_1, %amax), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + (x3), tmp9, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/6l/c6l2hpr47rl224l4prc22qgakmghfqm3ejbskltdi2gryeriryle.py
# Topologically Sorted Source Nodes: [idf, mul, sum_1], Original ATen: [aten._softmax, aten.mul, aten.sum]
# Source node to ATen node mapping:
# idf => div, sum_1
# mul => mul
# sum_1 => sum_2
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%arg1_1, %div), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
triton_poi_fused__softmax_mul_sum_1 = async_compile.triton('triton_poi_fused__softmax_mul_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_mul_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp2 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp4 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp6 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp10 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp14 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp18 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp1 / tmp7
tmp9 = tmp0 * tmp8
tmp11 = tmp2 / tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp15 = tmp4 / tmp7
tmp16 = tmp14 * tmp15
tmp17 = tmp13 + tmp16
tmp19 = tmp6 / tmp7
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tl.store(out_ptr0 + (x2), tmp21, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [idf], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(arg0_1, buf0, 256, grid=grid(256), stream=stream0)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [idf, mul, sum_1], Original ATen: [aten._softmax, aten.mul, aten.sum]
triton_poi_fused__softmax_mul_sum_1.run(arg1_1, buf0, buf1, 64, grid=grid(64), stream=stream0)
del arg1_1
del buf0
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class IdfCombination(nn.Module):
def forward(self, scores, idf):
idf = idf.softmax(dim=1)
return (scores * idf).sum(dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tmp9 = tl_math.exp(tmp8)
tl.store(out_ptr0 + x3, tmp9, xmask)
@triton.jit
def triton_poi_fused__softmax_mul_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp2 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp4 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp6 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp10 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp14 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp18 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp1 / tmp7
tmp9 = tmp0 * tmp8
tmp11 = tmp2 / tmp7
tmp12 = tmp10 * tmp11
tmp13 = tmp9 + tmp12
tmp15 = tmp4 / tmp7
tmp16 = tmp14 * tmp15
tmp17 = tmp13 + tmp16
tmp19 = tmp6 / tmp7
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tl.store(out_ptr0 + x2, tmp21, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(256)](arg0_1, buf0, 256, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_mul_sum_1[grid(64)](arg1_1, buf0, buf1,
64, XBLOCK=64, num_warps=1, num_stages=1)
del arg1_1
del buf0
return buf1,
class IdfCombinationNew(nn.Module):
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Georgetown-IR-Lab/OpenNIR
|
IdfCombination
| false | 13,716 |
[
"MIT"
] | 140 |
7d93e8643fe311e3e9c7a0678efe9775fd80485e
|
https://github.com/Georgetown-IR-Lab/OpenNIR/tree/7d93e8643fe311e3e9c7a0678efe9775fd80485e
|
MLP
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3u/c3ub52l73zdv4klgqzgxmtzrzxvztuyczv2jksnvrjr7erq7guxd.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
# Source node to ATen node mapping:
# x_1 => clone
# Graph fragment:
# %clone : [num_users=1] = call_function[target=torch.ops.aten.clone.default](args = (%permute,), kwargs = {memory_format: torch.contiguous_format})
triton_poi_fused_clone_0 = async_compile.triton('triton_poi_fused_clone_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clone_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = (yindex // 16)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (16*x2) + (64*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/t6/ct6f57cdvyh3ahq6iwyawuy7577bar2ftumjxqllolmn4c4lh7ph.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add]
# Source node to ATen node mapping:
# x_1 => add
# Graph fragment:
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%view_2, %primals_3), kwargs = {})
triton_poi_fused_add_1 = async_compile.triton('triton_poi_fused_add_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_1', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + (x2), tmp2, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.clone]
stream0 = get_raw_stream(0)
triton_poi_fused_clone_0.run(primals_1, buf0, 64, 4, grid=grid(64, 4), stream=stream0)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0); del buf1 # reuse
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten.add]
triton_poi_fused_add_1.run(buf2, primals_3, 256, grid=grid(256), stream=stream0)
del primals_3
return (buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import Tensor
from torch import nn
class MLP(nn.Module):
def __init__(self, dim, embed_dim):
super().__init__()
self.proj = nn.Linear(dim, embed_dim)
def forward(self, x: 'Tensor') ->Tensor:
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4, 'embed_dim': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_clone_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK: tl.
constexpr, XBLOCK: tl.constexpr):
ynumel = 64
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 16
y1 = yindex // 16
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 16 * x2 + 64 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_add_1(in_out_ptr0, in_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tl.store(in_out_ptr0 + x2, tmp2, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 16, 4), (64, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clone_0[grid(64, 4)](primals_1, buf0, 64, 4,
XBLOCK=4, YBLOCK=32, num_warps=4, num_stages=1)
del primals_1
buf1 = empty_strided_cuda((64, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf0, (64, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf1)
del primals_2
buf2 = reinterpret_tensor(buf1, (4, 16, 4), (64, 4, 1), 0)
del buf1
triton_poi_fused_add_1[grid(256)](buf2, primals_3, 256, XBLOCK=128,
num_warps=4, num_stages=1)
del primals_3
return buf2, reinterpret_tensor(buf0, (64, 4), (4, 1), 0)
class MLPNew(nn.Module):
def __init__(self, dim, embed_dim):
super().__init__()
self.proj = nn.Linear(dim, embed_dim)
def forward(self, input_0):
primals_2 = self.proj.weight
primals_3 = self.proj.bias
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Genevievekim/semantic-segmentation-1
|
MLP
| false | 13,717 |
[
"BSD-3-Clause"
] | 196 |
f28b026e44cff80fe3ca4cac94cea27e4073821b
|
https://github.com/Genevievekim/semantic-segmentation-1/tree/f28b026e44cff80fe3ca4cac94cea27e4073821b
|
GEGLU
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/g6/cg6py454hsmh5ek5ufygwdjqk6x5t4pxnpc52hre5drrru6gkirg.py
# Topologically Sorted Source Nodes: [gelu, mul], Original ATen: [aten.gelu, aten.mul]
# Source node to ATen node mapping:
# gelu => add, erf, mul, mul_1, mul_2
# mul => mul_3
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.5), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem_1, 0.7071067811865476), kwargs = {})
# %erf : [num_users=1] = call_function[target=torch.ops.aten.erf.default](args = (%mul_1,), kwargs = {})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%erf, 1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %add), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%getitem, %mul_2), kwargs = {})
triton_poi_fused_gelu_mul_0 = async_compile.triton('triton_poi_fused_gelu_mul_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[128],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_gelu_mul_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = (xindex // 2)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (4*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + (4*x1)), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865476
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp10 = tmp0 * tmp9
tl.store(out_ptr0 + (x2), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
# Topologically Sorted Source Nodes: [gelu, mul], Original ATen: [aten.gelu, aten.mul]
stream0 = get_raw_stream(0)
triton_poi_fused_gelu_mul_0.run(arg0_1, buf0, 128, grid=grid(128), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
import torch.nn.functional as F
class GEGLU(nn.Module):
def forward(self, x):
x, gates = x.chunk(2, dim=-1)
return x * F.gelu(gates)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_gelu_mul_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 128
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 2
x1 = xindex // 2
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 4 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (2 + x0 + 4 * x1), xmask)
tmp2 = 0.5
tmp3 = tmp1 * tmp2
tmp4 = 0.7071067811865476
tmp5 = tmp1 * tmp4
tmp6 = libdevice.erf(tmp5)
tmp7 = 1.0
tmp8 = tmp6 + tmp7
tmp9 = tmp3 * tmp8
tmp10 = tmp0 * tmp9
tl.store(out_ptr0 + x2, tmp10, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 2), (32, 8, 2, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_gelu_mul_0[grid(128)](arg0_1, buf0, 128, XBLOCK=
128, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class GEGLUNew(nn.Module):
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Gitsamshi/DALLE-pytorch
|
GEGLU
| false | 13,718 |
[
"MIT"
] | 4,025 |
6cfc43158a4615865e97c839133290afcf289824
|
https://github.com/Gitsamshi/DALLE-pytorch/tree/6cfc43158a4615865e97c839133290afcf289824
|
DivideMax
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/aj/cajsvoa7fstjjubnvzw5a3oolwiymmoabi4yem6uzbjjug2hou5t.py
# Topologically Sorted Source Nodes: [amax, truediv], Original ATen: [aten.amax, aten.div]
# Source node to ATen node mapping:
# amax => amax
# truediv => div
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg0_1, [4], True), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%arg0_1, %amax), kwargs = {})
triton_poi_fused_amax_div_0 = async_compile.triton('triton_poi_fused_amax_div_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[1024],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_amax_div_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_amax_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [amax, truediv], Original ATen: [aten.amax, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_amax_div_0.run(arg0_1, buf0, 1024, grid=grid(1024), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4, 4), (256, 64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class DivideMax(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
maxes = x.amax(dim=self.dim, keepdim=True).detach()
return x / maxes
def get_inputs():
return [torch.rand([4, 4, 4, 4, 4])]
def get_init_inputs():
return [[], {'dim': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_amax_div_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 1024
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4, 4), (256, 64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4, 4), (256, 64, 16, 4, 1),
torch.float32)
get_raw_stream(0)
triton_poi_fused_amax_div_0[grid(1024)](arg0_1, buf0, 1024, XBLOCK=
256, num_warps=4, num_stages=1)
del arg0_1
return buf0,
class DivideMaxNew(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
Gitsamshi/DALLE-pytorch
|
DivideMax
| false | 13,719 |
[
"MIT"
] | 4,025 |
6cfc43158a4615865e97c839133290afcf289824
|
https://github.com/Gitsamshi/DALLE-pytorch/tree/6cfc43158a4615865e97c839133290afcf289824
|
SumCombination
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/iu/ciuxern2omgit5ovksuiwlddxkww6e3pkid4q2h3sauzn5rbd35z.py
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv1d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%permute, %primals_2, %primals_3, [1], [0], [1], False, [0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (16*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (4*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cw/ccwba44b2bjycghp6gegqifslfi2kozagdgqzncl44lbdkrzord5.py
# Topologically Sorted Source Nodes: [sum_1, scores_1], Original ATen: [aten.sum, aten.div]
# Source node to ATen node mapping:
# scores_1 => div
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%select, [1]), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %primals_4), kwargs = {})
triton_poi_fused_div_sum_1 = async_compile.triton('triton_poi_fused_div_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_div_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_div_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (0))
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + (x2), xmask)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 / tmp4
tl.store(out_ptr0 + (x2), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (1, ), (1, ))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(primals_1, buf0, 16, 4, grid=grid(16, 4), stream=stream0)
# Topologically Sorted Source Nodes: [conv1d], Original ATen: [aten.convolution]
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,), padding=(0,), dilation=(1,), transposed=False, output_padding=(0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4), (4, 4, 1))
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [sum_1, scores_1], Original ATen: [aten.sum, aten.div]
triton_poi_fused_div_sum_1.run(buf1, primals_3, primals_4, buf2, 64, grid=grid(64), stream=stream0)
del buf1
del primals_3
return (buf2, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4, 4), (16, 1, 4), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((1, 4, 1), (4, 1, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from torch import nn
class SumCombination(nn.Module):
def __init__(self, dim_in, normalize=True):
super(SumCombination, self).__init__()
self.conv = nn.Conv1d(dim_in, 1, 1)
self.normalize = normalize
def forward(self, x, qlen):
scores = self.conv(x.permute(0, 2, 1))[:, :, 0]
if self.normalize:
scores = scores.sum(dim=1) / qlen.type_as(scores)
return scores
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dim_in': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch import nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 4
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 16 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 4 * y3), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_div_sum_1(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 0)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK])
tmp4 = tl.load(in_ptr2 + x2, xmask)
tmp3 = tmp0 + tmp2
tmp5 = tmp3 / tmp4
tl.store(out_ptr0 + x2, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (1, 4, 1), (4, 1, 1))
assert_size_stride(primals_3, (1,), (1,))
assert_size_stride(primals_4, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4)](primals_1, buf0, 16, 4,
XBLOCK=4, YBLOCK=16, num_warps=1, num_stages=1)
buf1 = extern_kernels.convolution(buf0, primals_2, stride=(1,),
padding=(0,), dilation=(1,), transposed=False, output_padding=(
0,), groups=1, bias=None)
assert_size_stride(buf1, (4, 1, 4), (4, 4, 1))
buf2 = buf0
del buf0
triton_poi_fused_div_sum_1[grid(64)](buf1, primals_3, primals_4,
buf2, 64, XBLOCK=64, num_warps=1, num_stages=1)
del buf1
del primals_3
return buf2, primals_2, primals_4, reinterpret_tensor(primals_1, (4, 4,
4), (16, 1, 4), 0)
class SumCombinationNew(nn.Module):
def __init__(self, dim_in, normalize=True):
super(SumCombinationNew, self).__init__()
self.conv = nn.Conv1d(dim_in, 1, 1)
self.normalize = normalize
def forward(self, input_0, input_1):
primals_2 = self.conv.weight
primals_3 = self.conv.bias
primals_1 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Georgetown-IR-Lab/OpenNIR
|
SumCombination
| false | 13,720 |
[
"MIT"
] | 140 |
7d93e8643fe311e3e9c7a0678efe9775fd80485e
|
https://github.com/Georgetown-IR-Lab/OpenNIR/tree/7d93e8643fe311e3e9c7a0678efe9775fd80485e
|
MaxPooling
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ye/cyer3e3q24cnknj35g4jhiqyzqlzuynppjhp52ioic4qspesjovr.py
# Topologically Sorted Source Nodes: [masked_fill_, max_1, eq_1, masked_fill__1], Original ATen: [aten.masked_fill, aten.max, aten.eq]
# Source node to ATen node mapping:
# eq_1 => eq_1
# masked_fill_ => full_default, where
# masked_fill__1 => full_default_1, where_1
# max_1 => max_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], -1000000.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%expand, %full_default, %arg1_1), kwargs = {})
# %max_1 : [num_users=1] = call_function[target=torch.ops.aten.max.dim](args = (%where, 1), kwargs = {})
# %eq_1 : [num_users=1] = call_function[target=torch.ops.aten.eq.Scalar](args = (%getitem, -1000000.0), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%eq_1, %full_default_1, %getitem), kwargs = {})
triton_poi_fused_eq_masked_fill_max_0 = async_compile.triton('triton_poi_fused_eq_masked_fill_max_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_eq_masked_fill_max_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_eq_masked_fill_max_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = (xindex // 4)
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + (16*x1)), xmask)
tmp6 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (4 + x0 + (16*x1)), xmask)
tmp11 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp13 = tl.load(in_ptr1 + (8 + x0 + (16*x1)), xmask)
tmp16 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr1 + (12 + x0 + (16*x1)), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = -1000000.0
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 == tmp1
tmp9 = tl.where(tmp7, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp12 = tmp11 == tmp1
tmp14 = tl.where(tmp12, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp17 = tmp16 == tmp1
tmp19 = tl.where(tmp17, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp20 == tmp4
tmp22 = tl.where(tmp21, tmp1, tmp20)
tl.store(in_out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [masked_fill_, max_1, eq_1, masked_fill__1], Original ATen: [aten.masked_fill, aten.max, aten.eq]
stream0 = get_raw_stream(0)
triton_poi_fused_eq_masked_fill_max_0.run(buf1, arg0_1, arg1_1, 16, grid=grid(16), stream=stream0)
del arg0_1
del arg1_1
return (buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class MaxPooling(nn.Module):
def __init__(self):
super(MaxPooling, self).__init__()
self.MIN = -1000000.0
"""
(item, subitem) can be (word, characters), or (sentence, words)
x: num_items x max_subitem_size x input_size
x_mask: num_items x max_subitem_size
return num_items x input_size
"""
def forward(self, x, x_mask):
"""
x_output: num_items x input_size x 1 --> num_items x input_size
"""
empty_mask = x_mask.eq(0).unsqueeze(2).expand_as(x)
x_now = x.clone()
x_now.data.masked_fill_(empty_mask.data, self.MIN)
x_output = x_now.max(1)[0]
x_output.data.masked_fill_(x_output.data.eq(self.MIN), 0)
return x_output
def get_inputs():
return [torch.rand([4, 4, 4]), torch.rand([4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_eq_masked_fill_max_0(in_out_ptr0, in_ptr0, in_ptr1,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x1 = xindex // 4
x0 = xindex % 4
x2 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x0 + 16 * x1), xmask)
tmp6 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (4 + x0 + 16 * x1), xmask)
tmp11 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp13 = tl.load(in_ptr1 + (8 + x0 + 16 * x1), xmask)
tmp16 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr1 + (12 + x0 + 16 * x1), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = -1000000.0
tmp5 = tl.where(tmp2, tmp4, tmp3)
tmp7 = tmp6 == tmp1
tmp9 = tl.where(tmp7, tmp4, tmp8)
tmp10 = triton_helpers.maximum(tmp5, tmp9)
tmp12 = tmp11 == tmp1
tmp14 = tl.where(tmp12, tmp4, tmp13)
tmp15 = triton_helpers.maximum(tmp10, tmp14)
tmp17 = tmp16 == tmp1
tmp19 = tl.where(tmp17, tmp4, tmp18)
tmp20 = triton_helpers.maximum(tmp15, tmp19)
tmp21 = tmp20 == tmp4
tmp22 = tl.where(tmp21, tmp1, tmp20)
tl.store(in_out_ptr0 + x2, tmp22, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4), (4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_eq_masked_fill_max_0[grid(16)](buf1, arg0_1,
arg1_1, 16, XBLOCK=16, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf1,
class MaxPoolingNew(nn.Module):
def __init__(self):
super(MaxPoolingNew, self).__init__()
self.MIN = -1000000.0
"""
(item, subitem) can be (word, characters), or (sentence, words)
x: num_items x max_subitem_size x input_size
x_mask: num_items x max_subitem_size
return num_items x input_size
"""
def forward(self, input_0, input_1):
arg1_1 = input_0
arg0_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
GingerNg/SDNet
|
MaxPooling
| false | 13,721 |
[
"MIT"
] | 112 |
48ad8cc57c9a02aaad10e34d0c91a174ac68f056
|
https://github.com/GingerNg/SDNet/tree/48ad8cc57c9a02aaad10e34d0c91a174ac68f056
|
LinearBlock
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/67/c67nwtvejry7jovauj4gfwnoxaeorcxngrt3hs2o3y3i4niugvpw.py
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._native_batch_norm_legit]
# Source node to ATen node mapping:
# x_1 => add, rsqrt, var_mean
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [0, 2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 0.001), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
triton_per_fused__native_batch_norm_legit_0 = async_compile.triton('triton_per_fused__native_batch_norm_legit_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 16],
reduction_hint=ReductionHint.DEFAULT,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__native_batch_norm_legit_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__native_batch_norm_legit_0(in_ptr0, out_ptr0, out_ptr1, out_ptr2, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 16
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 4
r2 = (rindex // 4)
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + (4*x0) + (16*r2)), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 0.001
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + (x0), tmp21, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
tl.store(out_ptr1 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dy/cdyw65rioafwiakcljlghmqa24g7jnrjibhdgma4b5qyneqwjh77.py
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# x_1 => add, add_1, mul, mul_1, rsqrt, sub, var_mean
# x_2 => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view_1, [0, 2]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 0.001), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%view_1, %getitem_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %rsqrt), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %unsqueeze), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_1), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1 = async_compile.triton('triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*i1', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 4) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 16.0
tmp5 = tmp3 / tmp4
tmp6 = 0.001
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + (x3), tmp15, xmask)
tl.store(out_ptr1 + (x3), tmp17, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, ), (1, ))
assert_size_stride(primals_5, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [x], Original ATen: [aten.addmm]
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
buf4 = empty_strided_cuda((1, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [x_1], Original ATen: [aten._native_batch_norm_legit]
stream0 = get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_0.run(buf0, buf1, buf2, buf4, 4, 16, grid=grid(4), stream=stream0)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [x_1, x_2], Original ATen: [aten._native_batch_norm_legit, aten.relu, aten.threshold_backward]
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1.run(buf0, buf1, buf2, primals_4, primals_5, buf5, buf6, 64, grid=grid(64), stream=stream0)
del buf2
del primals_5
return (buf5, primals_4, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0), buf0, reinterpret_tensor(buf4, (4, ), (1, ), 0), buf6, reinterpret_tensor(buf1, (1, 4, 1), (4, 1, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
from scipy.stats import truncnorm
def truncated_normal_(tensor, mean=0.0, std=1.0):
values = truncnorm.rvs(-2, 2, size=tensor.shape)
values = mean + std * values
tensor.copy_(torch.from_numpy(values))
return tensor
def fc_init_(module):
if hasattr(module, 'weight') and module.weight is not None:
truncated_normal_(module.weight.data, mean=0.0, std=0.01)
if hasattr(module, 'bias') and module.bias is not None:
torch.nn.init.constant_(module.bias.data, 0.0)
return module
class LinearBlock(torch.nn.Module):
def __init__(self, input_size, output_size):
super(LinearBlock, self).__init__()
self.relu = torch.nn.ReLU()
self.normalize = torch.nn.BatchNorm1d(output_size, affine=True,
momentum=0.999, eps=0.001, track_running_stats=False)
self.linear = torch.nn.Linear(input_size, output_size)
fc_init_(self.linear)
def forward(self, x):
x = self.linear(x)
x = self.normalize(x)
x = self.relu(x)
return x
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'input_size': 4, 'output_size': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from scipy.stats import truncnorm
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused__native_batch_norm_legit_0(in_ptr0, out_ptr0, out_ptr1,
out_ptr2, xnumel, rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex % 4
r2 = rindex // 4
x0 = xindex
tmp0 = tl.load(in_ptr0 + (r1 + 4 * x0 + 16 * r2), xmask, other=0.0)
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 16, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = 16.0
tmp18 = tmp16 / tmp17
tmp19 = 0.001
tmp20 = tmp18 + tmp19
tmp21 = libdevice.rsqrt(tmp20)
tl.store(out_ptr2 + x0, tmp21, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
tl.store(out_ptr1 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1(in_ptr0
, in_ptr1, in_ptr2, in_ptr3, in_ptr4, out_ptr0, out_ptr1, xnumel,
XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 4 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp2 = tmp0 - tmp1
tmp4 = 16.0
tmp5 = tmp3 / tmp4
tmp6 = 0.001
tmp7 = tmp5 + tmp6
tmp8 = libdevice.rsqrt(tmp7)
tmp9 = tmp2 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 + tmp12
tmp14 = tl.full([1], 0, tl.int32)
tmp15 = triton_helpers.maximum(tmp14, tmp13)
tmp16 = 0.0
tmp17 = tmp15 <= tmp16
tl.store(out_ptr0 + x3, tmp15, xmask)
tl.store(out_ptr1 + x3, tmp17, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5 = args
args.clear()
assert_size_stride(primals_1, (4, 4), (4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4,), (1,))
assert_size_stride(primals_5, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.addmm(primals_2, reinterpret_tensor(primals_3, (16,
4), (4, 1), 0), reinterpret_tensor(primals_1, (4, 4), (1, 4), 0
), alpha=1, beta=1, out=buf0)
del primals_1
del primals_2
buf1 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
buf2 = empty_strided_cuda((1, 4, 1), (4, 1, 4), torch.float32)
buf4 = empty_strided_cuda((1, 4, 1), (4, 1, 1), torch.float32)
get_raw_stream(0)
triton_per_fused__native_batch_norm_legit_0[grid(4)](buf0, buf1,
buf2, buf4, 4, 16, XBLOCK=1, num_warps=2, num_stages=1)
buf5 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused__native_batch_norm_legit_relu_threshold_backward_1[
grid(64)](buf0, buf1, buf2, primals_4, primals_5, buf5, buf6,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf2
del primals_5
return buf5, primals_4, reinterpret_tensor(primals_3, (16, 4), (4, 1), 0
), buf0, reinterpret_tensor(buf4, (4,), (1,), 0
), buf6, reinterpret_tensor(buf1, (1, 4, 1), (4, 1, 1), 0)
def truncated_normal_(tensor, mean=0.0, std=1.0):
values = truncnorm.rvs(-2, 2, size=tensor.shape)
values = mean + std * values
tensor.copy_(torch.from_numpy(values))
return tensor
def fc_init_(module):
if hasattr(module, 'weight') and module.weight is not None:
truncated_normal_(module.weight.data, mean=0.0, std=0.01)
if hasattr(module, 'bias') and module.bias is not None:
torch.nn.init.constant_(module.bias.data, 0.0)
return module
class LinearBlockNew(torch.nn.Module):
def __init__(self, input_size, output_size):
super(LinearBlockNew, self).__init__()
self.relu = torch.nn.ReLU()
self.normalize = torch.nn.BatchNorm1d(output_size, affine=True,
momentum=0.999, eps=0.001, track_running_stats=False)
self.linear = torch.nn.Linear(input_size, output_size)
fc_init_(self.linear)
def forward(self, input_0):
primals_2 = self.normalize.weight
primals_4 = self.normalize.bias
primals_1 = self.linear.weight
primals_5 = self.linear.bias
primals_3 = input_0
output = call([primals_1, primals_2, primals_3, primals_4, primals_5])
return output[0]
|
Brikwerk/learn2learn
|
LinearBlock
| false | 13,722 |
[
"MIT"
] | 1,774 |
7997c13c26ec627d13ce77ba98427260df78ada8
|
https://github.com/Brikwerk/learn2learn/tree/7997c13c26ec627d13ce77ba98427260df78ada8
|
SumAggregator
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/7o/c7otc5ij6whexgxcr56vlxp2l7hzg3oc4onljp557uc6wncu5gvg.py
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
# Source node to ATen node mapping:
# sum_1 => sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%arg0_1, [1]), kwargs = {})
triton_poi_fused_sum_0 = async_compile.triton('triton_poi_fused_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [sum_1], Original ATen: [aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_sum_0.run(arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class SumAggregator(nn.Module):
def __init__(self):
super(SumAggregator, self).__init__()
def forward(self, neighbor):
return torch.sum(neighbor, dim=1)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_sum_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp1 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp3 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp2 = tmp0 + tmp1
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tl.store(out_ptr0 + x2, tmp6, xmask)
def call(args):
arg0_1, = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_sum_0[grid(64)](arg0_1, buf0, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del arg0_1
return buf0,
class SumAggregatorNew(nn.Module):
def __init__(self):
super(SumAggregatorNew, self).__init__()
def forward(self, input_0):
arg0_1 = input_0
output = call([arg0_1])
return output[0]
|
GraphNAS/GraphNAS
|
SumAggregator
| false | 13,723 |
[
"Apache-2.0"
] | 94 |
b4f05bb10b8b96bb9e82344bfae36a23db2431a6
|
https://github.com/GraphNAS/GraphNAS/tree/b4f05bb10b8b96bb9e82344bfae36a23db2431a6
|
GDN
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/sd/csdwvvi3wbrioicxfahsmuujha2ghzed5cce2spz33p6va2ab6i4.py
# Topologically Sorted Source Nodes: [gamma, pow_2, gamma_1], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub]
# Source node to ATen node mapping:
# gamma => full_default_1, maximum_1
# gamma_1 => sub_1
# pow_2 => pow_2
# Graph fragment:
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4], 3.814697265625e-06), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum_1 : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_3, %full_default_1), kwargs = {})
# %pow_2 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum_1, 2), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_2, 1.4551915228366852e-11), kwargs = {})
triton_poi_fused_maximum_mul_pow_sub_0 = async_compile.triton('triton_poi_fused_maximum_mul_pow_sub_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_maximum_mul_pow_sub_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_maximum_mul_pow_sub_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 3.814697265625e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/2y/c2y2depfxa2z6x54ydtq4sztgglwdvphffk5xy2ad6meezomjm6h.py
# Topologically Sorted Source Nodes: [pow_3], Original ATen: [aten.pow]
# Source node to ATen node mapping:
# pow_3 => pow_3
# Graph fragment:
# %pow_3 : [num_users=2] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%primals_1, 2), kwargs = {})
triton_poi_fused_pow_1 = async_compile.triton('triton_poi_fused_pow_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_pow_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + (x0), tmp1, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/rf/crfppqbslgh44juuhqfpfib7bokrzsfockkr2b3gvhzhdu4wbgaj.py
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution]
# Source node to ATen node mapping:
# beta => full_default, maximum
# beta_1 => sub
# norm_ => convolution
# pow_1 => pow_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], 0.0010000072652474046), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_2, %full_default), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, 1.4551915228366852e-11), kwargs = {})
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%pow_3, %view, %sub, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_maximum_mul_pow_sub_2 = async_compile.triton('triton_poi_fused_convolution_maximum_mul_pow_sub_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[4],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_maximum_mul_pow_sub_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_maximum_mul_pow_sub_2(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (x0), xmask)
tmp1 = 0.0010000072652474046
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tl.store(out_ptr0 + (x0), tmp5, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/tc/ctce67tjwgeiaxzrcsilnohm7paxqb2pdfdqcrlrvnl3xpwm4s4c.py
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_, norm__1, outputs], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution, aten.sqrt, aten.div]
# Source node to ATen node mapping:
# beta => full_default, maximum
# beta_1 => sub
# norm_ => convolution
# norm__1 => sqrt
# outputs => div
# pow_1 => pow_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4], 0.0010000072652474046), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %maximum : [num_users=1] = call_function[target=torch.ops.aten.maximum.default](args = (%primals_2, %full_default), kwargs = {})
# %pow_1 : [num_users=1] = call_function[target=torch.ops.aten.pow.Tensor_Scalar](args = (%maximum, 2), kwargs = {})
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%pow_1, 1.4551915228366852e-11), kwargs = {})
# %convolution : [num_users=2] = call_function[target=torch.ops.aten.convolution.default](args = (%pow_3, %view, %sub, [1, 1], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%convolution,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%primals_1, %sqrt), kwargs = {})
triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3 = async_compile.triton('triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_out_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3), xmask)
tmp2 = tmp0 + tmp1
tmp4 = libdevice.sqrt(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + (x3), tmp2, xmask)
tl.store(out_ptr0 + (x3), tmp5, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4, ), (1, ))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [gamma, pow_2, gamma_1], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub]
stream0 = get_raw_stream(0)
triton_poi_fused_maximum_mul_pow_sub_0.run(primals_3, buf0, 16, grid=grid(16), stream=stream0)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [pow_3], Original ATen: [aten.pow]
triton_poi_fused_pow_1.run(primals_1, buf1, 256, grid=grid(256), stream=stream0)
buf2 = empty_strided_cuda((4, ), (1, ), torch.float32)
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution]
triton_poi_fused_convolution_maximum_mul_pow_sub_2.run(primals_2, buf2, 4, grid=grid(4), stream=stream0)
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution]
buf3 = extern_kernels.convolution(buf1, reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3; del buf3 # reuse
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [beta, pow_1, beta_1, norm_, norm__1, outputs], Original ATen: [aten.mul, aten.maximum, aten.pow, aten.sub, aten.convolution, aten.sqrt, aten.div]
triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3.run(buf4, buf2, primals_1, buf5, 256, grid=grid(256), stream=stream0)
del buf2
return (buf5, primals_1, primals_2, primals_3, reinterpret_tensor(buf0, (4, 4, 1, 1), (4, 1, 1, 1), 0), buf1, buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
from torch.autograd import Function
import torch
import torch.nn as nn
import torch.utils.data
class LowerBound(Function):
@staticmethod
def forward(ctx, inputs, bound):
b = torch.ones_like(inputs) * bound
ctx.save_for_backward(inputs, b)
return torch.max(inputs, b)
@staticmethod
def backward(ctx, grad_output):
inputs, b = ctx.saved_tensors
pass_through_1 = inputs >= b
pass_through_2 = grad_output < 0
pass_through = pass_through_1 | pass_through_2
return pass_through.type(grad_output.dtype) * grad_output, None
class GDN(nn.Module):
"""Generalized divisive normalization layer.
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
"""
def __init__(self, ch, inverse=False, beta_min=1e-06, gamma_init=0.1,
reparam_offset=2 ** -18):
super(GDN, self).__init__()
self.inverse = inverse
self.beta_min = beta_min
self.gamma_init = gamma_init
self.reparam_offset = reparam_offset
self.build(ch)
def build(self, ch):
self.pedestal = self.reparam_offset ** 2
self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** 0.5
self.gamma_bound = self.reparam_offset
beta = torch.sqrt(torch.ones(ch) + self.pedestal)
self.beta = nn.Parameter(beta)
eye = torch.eye(ch)
g = self.gamma_init * eye
g = g + self.pedestal
gamma = torch.sqrt(g)
self.gamma = nn.Parameter(gamma)
self.pedestal = self.pedestal
def forward(self, inputs):
unfold = False
if inputs.dim() == 5:
unfold = True
bs, ch, d, w, h = inputs.size()
inputs = inputs.view(bs, ch, d * w, h)
_, ch, _, _ = inputs.size()
beta = LowerBound.apply(self.beta, self.beta_bound)
beta = beta ** 2 - self.pedestal
gamma = LowerBound.apply(self.gamma, self.gamma_bound)
gamma = gamma ** 2 - self.pedestal
gamma = gamma.view(ch, ch, 1, 1)
norm_ = nn.functional.conv2d(inputs ** 2, gamma, beta)
norm_ = torch.sqrt(norm_)
if self.inverse:
outputs = inputs * norm_
else:
outputs = inputs / norm_
if unfold:
outputs = outputs.view(bs, ch, d, w, h)
return outputs
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'ch': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
from torch.autograd import Function
import torch.nn as nn
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused_maximum_mul_pow_sub_0(in_ptr0, out_ptr0, xnumel,
XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 3.814697265625e-06
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_pow_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = tmp0 * tmp0
tl.store(out_ptr0 + x0, tmp1, xmask)
@triton.jit
def triton_poi_fused_convolution_maximum_mul_pow_sub_2(in_ptr0, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 4
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + x0, xmask)
tmp1 = 0.0010000072652474046
tmp2 = triton_helpers.maximum(tmp0, tmp1)
tmp3 = tmp2 * tmp2
tmp4 = 1.4551915228366852e-11
tmp5 = tmp3 - tmp4
tl.store(out_ptr0 + x0, tmp5, xmask)
@triton.jit
def triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3(in_out_ptr0,
in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_out_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + x3, xmask)
tmp2 = tmp0 + tmp1
tmp4 = libdevice.sqrt(tmp2)
tmp5 = tmp3 / tmp4
tl.store(in_out_ptr0 + x3, tmp2, xmask)
tl.store(out_ptr0 + x3, tmp5, xmask)
def call(args):
primals_1, primals_2, primals_3 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_2, (4,), (1,))
assert_size_stride(primals_3, (4, 4), (4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4), (4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_maximum_mul_pow_sub_0[grid(16)](primals_3, buf0,
16, XBLOCK=16, num_warps=1, num_stages=1)
buf1 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_pow_1[grid(256)](primals_1, buf1, 256, XBLOCK=256,
num_warps=4, num_stages=1)
buf2 = empty_strided_cuda((4,), (1,), torch.float32)
triton_poi_fused_convolution_maximum_mul_pow_sub_2[grid(4)](primals_2,
buf2, 4, XBLOCK=4, num_warps=1, num_stages=1)
buf3 = extern_kernels.convolution(buf1, reinterpret_tensor(buf0, (4,
4, 1, 1), (4, 1, 0, 0), 0), stride=(1, 1), padding=(0, 0),
dilation=(1, 1), transposed=False, output_padding=(0, 0),
groups=1, bias=None)
assert_size_stride(buf3, (4, 4, 4, 4), (64, 16, 4, 1))
buf4 = buf3
del buf3
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
triton_poi_fused_convolution_div_maximum_mul_pow_sqrt_sub_3[grid(256)](
buf4, buf2, primals_1, buf5, 256, XBLOCK=128, num_warps=4,
num_stages=1)
del buf2
return buf5, primals_1, primals_2, primals_3, reinterpret_tensor(buf0,
(4, 4, 1, 1), (4, 1, 1, 1), 0), buf1, buf4
class LowerBound(Function):
@staticmethod
def forward(ctx, inputs, bound):
b = torch.ones_like(inputs) * bound
ctx.save_for_backward(inputs, b)
return torch.max(inputs, b)
@staticmethod
def backward(ctx, grad_output):
inputs, b = ctx.saved_tensors
pass_through_1 = inputs >= b
pass_through_2 = grad_output < 0
pass_through = pass_through_1 | pass_through_2
return pass_through.type(grad_output.dtype) * grad_output, None
class GDNNew(nn.Module):
"""Generalized divisive normalization layer.
y[i] = x[i] / sqrt(beta[i] + sum_j(gamma[j, i] * x[j]))
"""
def __init__(self, ch, inverse=False, beta_min=1e-06, gamma_init=0.1,
reparam_offset=2 ** -18):
super(GDNNew, self).__init__()
self.inverse = inverse
self.beta_min = beta_min
self.gamma_init = gamma_init
self.reparam_offset = reparam_offset
self.build(ch)
def build(self, ch):
self.pedestal = self.reparam_offset ** 2
self.beta_bound = (self.beta_min + self.reparam_offset ** 2) ** 0.5
self.gamma_bound = self.reparam_offset
beta = torch.sqrt(torch.ones(ch) + self.pedestal)
self.beta = nn.Parameter(beta)
eye = torch.eye(ch)
g = self.gamma_init * eye
g = g + self.pedestal
gamma = torch.sqrt(g)
self.gamma = nn.Parameter(gamma)
self.pedestal = self.pedestal
def forward(self, input_0):
primals_2 = self.beta
primals_3 = self.gamma
primals_1 = input_0
output = call([primals_1, primals_2, primals_3])
return output[0]
|
Geunwoo-Jeon/iclr_17_compression
|
GDN
| false | 13,724 |
[
"MIT"
] | 56 |
a28746b1f1c518d91125d8f289d9511cde488c77
|
https://github.com/Geunwoo-Jeon/iclr_17_compression/tree/a28746b1f1c518d91125d8f289d9511cde488c77
|
BitEstimator
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/3b/c3bdm3d3443oq2lufzqzrryuzboofq3y5dx2ddbcghahimvz3qqp.py
# Topologically Sorted Source Nodes: [softplus, mul, x, tanh, tanh_1, mul_1, x_1, softplus_1, mul_2, x_2, tanh_2, tanh_3, mul_3, x_3, softplus_2, mul_4, x_4, tanh_4, tanh_5, mul_5, x_5, softplus_3, mul_6, add_6, sigmoid], Original ATen: [aten.softplus, aten.mul, aten.add, aten.tanh, aten.sigmoid]
# Source node to ATen node mapping:
# add_6 => add_6
# mul => mul
# mul_1 => mul_1
# mul_2 => mul_2
# mul_3 => mul_3
# mul_4 => mul_4
# mul_5 => mul_5
# mul_6 => mul_6
# sigmoid => sigmoid
# softplus => exp, gt, log1p, where
# softplus_1 => exp_1, gt_1, log1p_1, where_1
# softplus_2 => exp_2, gt_2, log1p_2, where_2
# softplus_3 => exp_3, gt_3, log1p_3, where_3
# tanh => tanh
# tanh_1 => tanh_1
# tanh_2 => tanh_2
# tanh_3 => tanh_3
# tanh_4 => tanh_4
# tanh_5 => tanh_5
# x => add
# x_1 => add_1
# x_2 => add_2
# x_3 => add_3
# x_4 => add_4
# x_5 => add_5
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_1,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %gt : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_1, 20), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %primals_1, %log1p), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %where), kwargs = {})
# %add : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul, %primals_3), kwargs = {})
# %tanh : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add,), kwargs = {})
# %tanh_1 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_4,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh, %tanh_1), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add, %mul_1), kwargs = {})
# %exp_1 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_5,), kwargs = {})
# %log1p_1 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_1,), kwargs = {})
# %gt_1 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_5, 20), kwargs = {})
# %where_1 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_1, %primals_5, %log1p_1), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_1, %where_1), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_2, %primals_6), kwargs = {})
# %tanh_2 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_2,), kwargs = {})
# %tanh_3 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_7,), kwargs = {})
# %mul_3 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_2, %tanh_3), kwargs = {})
# %add_3 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %mul_3), kwargs = {})
# %exp_2 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_8,), kwargs = {})
# %log1p_2 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_2,), kwargs = {})
# %gt_2 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_8, 20), kwargs = {})
# %where_2 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_2, %primals_8, %log1p_2), kwargs = {})
# %mul_4 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_3, %where_2), kwargs = {})
# %add_4 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_4, %primals_9), kwargs = {})
# %tanh_4 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%add_4,), kwargs = {})
# %tanh_5 : [num_users=1] = call_function[target=torch.ops.aten.tanh.default](args = (%primals_10,), kwargs = {})
# %mul_5 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%tanh_4, %tanh_5), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_4, %mul_5), kwargs = {})
# %exp_3 : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%primals_11,), kwargs = {})
# %log1p_3 : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp_3,), kwargs = {})
# %gt_3 : [num_users=1] = call_function[target=torch.ops.aten.gt.Scalar](args = (%primals_11, 20), kwargs = {})
# %where_3 : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt_3, %primals_11, %log1p_3), kwargs = {})
# %mul_6 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add_5, %where_3), kwargs = {})
# %add_6 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_6, %primals_12), kwargs = {})
# %sigmoid : [num_users=1] = call_function[target=torch.ops.aten.sigmoid.default](args = (%add_6,), kwargs = {})
triton_poi_fused_add_mul_sigmoid_softplus_tanh_0 = async_compile.triton('triton_poi_fused_add_mul_sigmoid_softplus_tanh_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: '*fp32', 8: '*fp32', 9: '*fp32', 10: '*fp32', 11: '*fp32', 12: '*fp32', 13: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mul_sigmoid_softplus_tanh_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 12, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mul_sigmoid_softplus_tanh_0(in_out_ptr0, in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8, in_ptr9, in_ptr10, in_ptr11, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = (xindex // 16) % 4
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr1 + (x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + (x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + (x1), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr6 + (x1), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr7 + (x1), xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr8 + (x1), xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr9 + (x1), xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr10 + (x1), xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr11 + (x1), xmask, eviction_policy='evict_last')
tmp2 = 20.0
tmp3 = tmp1 > tmp2
tmp4 = tl_math.exp(tmp1)
tmp5 = libdevice.log1p(tmp4)
tmp6 = tl.where(tmp3, tmp1, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = libdevice.tanh(tmp9)
tmp12 = libdevice.tanh(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp15 > tmp2
tmp17 = tl_math.exp(tmp15)
tmp18 = libdevice.log1p(tmp17)
tmp19 = tl.where(tmp16, tmp15, tmp18)
tmp20 = tmp14 * tmp19
tmp22 = tmp20 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp25 = libdevice.tanh(tmp24)
tmp26 = tmp23 * tmp25
tmp27 = tmp22 + tmp26
tmp29 = tmp28 > tmp2
tmp30 = tl_math.exp(tmp28)
tmp31 = libdevice.log1p(tmp30)
tmp32 = tl.where(tmp29, tmp28, tmp31)
tmp33 = tmp27 * tmp32
tmp35 = tmp33 + tmp34
tmp36 = libdevice.tanh(tmp35)
tmp38 = libdevice.tanh(tmp37)
tmp39 = tmp36 * tmp38
tmp40 = tmp35 + tmp39
tmp42 = tmp41 > tmp2
tmp43 = tl_math.exp(tmp41)
tmp44 = libdevice.log1p(tmp43)
tmp45 = tl.where(tmp42, tmp41, tmp44)
tmp46 = tmp40 * tmp45
tmp48 = tmp46 + tmp47
tmp49 = tl.sigmoid(tmp48)
tl.store(in_out_ptr0 + (x3), tmp49, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_12, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [softplus, mul, x, tanh, tanh_1, mul_1, x_1, softplus_1, mul_2, x_2, tanh_2, tanh_3, mul_3, x_3, softplus_2, mul_4, x_4, tanh_4, tanh_5, mul_5, x_5, softplus_3, mul_6, add_6, sigmoid], Original ATen: [aten.softplus, aten.mul, aten.add, aten.tanh, aten.sigmoid]
stream0 = get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_softplus_tanh_0.run(buf1, primals_2, primals_1, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12, 256, grid=grid(256), stream=stream0)
del primals_12
return (buf1, primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((1, 4, 1, 1), (4, 1, 1, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
class Bitparm(nn.Module):
"""
save params
"""
def __init__(self, channel, final=False):
super(Bitparm, self).__init__()
self.final = final
self.h = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
self.b = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
if not final:
self.a = nn.Parameter(torch.nn.init.normal_(torch.empty(channel
).view(1, -1, 1, 1), 0, 0.01))
else:
self.a = None
def forward(self, x):
if self.final:
return torch.sigmoid(x * F.softplus(self.h) + self.b)
else:
x = x * F.softplus(self.h) + self.b
return x + torch.tanh(x) * torch.tanh(self.a)
class BitEstimator(nn.Module):
"""
Estimate bit
"""
def __init__(self, channel):
super(BitEstimator, self).__init__()
self.f1 = Bitparm(channel)
self.f2 = Bitparm(channel)
self.f3 = Bitparm(channel)
self.f4 = Bitparm(channel, True)
def forward(self, x):
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
return self.f4(x)
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'channel': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_mul_sigmoid_softplus_tanh_0(in_out_ptr0, in_ptr0,
in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, in_ptr6, in_ptr7, in_ptr8,
in_ptr9, in_ptr10, in_ptr11, xnumel, XBLOCK: tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x1 = xindex // 16 % 4
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr1 + x1, xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr2 + x1, xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp21 = tl.load(in_ptr5 + x1, xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr6 + x1, xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr7 + x1, xmask, eviction_policy='evict_last')
tmp34 = tl.load(in_ptr8 + x1, xmask, eviction_policy='evict_last')
tmp37 = tl.load(in_ptr9 + x1, xmask, eviction_policy='evict_last')
tmp41 = tl.load(in_ptr10 + x1, xmask, eviction_policy='evict_last')
tmp47 = tl.load(in_ptr11 + x1, xmask, eviction_policy='evict_last')
tmp2 = 20.0
tmp3 = tmp1 > tmp2
tmp4 = tl_math.exp(tmp1)
tmp5 = libdevice.log1p(tmp4)
tmp6 = tl.where(tmp3, tmp1, tmp5)
tmp7 = tmp0 * tmp6
tmp9 = tmp7 + tmp8
tmp10 = libdevice.tanh(tmp9)
tmp12 = libdevice.tanh(tmp11)
tmp13 = tmp10 * tmp12
tmp14 = tmp9 + tmp13
tmp16 = tmp15 > tmp2
tmp17 = tl_math.exp(tmp15)
tmp18 = libdevice.log1p(tmp17)
tmp19 = tl.where(tmp16, tmp15, tmp18)
tmp20 = tmp14 * tmp19
tmp22 = tmp20 + tmp21
tmp23 = libdevice.tanh(tmp22)
tmp25 = libdevice.tanh(tmp24)
tmp26 = tmp23 * tmp25
tmp27 = tmp22 + tmp26
tmp29 = tmp28 > tmp2
tmp30 = tl_math.exp(tmp28)
tmp31 = libdevice.log1p(tmp30)
tmp32 = tl.where(tmp29, tmp28, tmp31)
tmp33 = tmp27 * tmp32
tmp35 = tmp33 + tmp34
tmp36 = libdevice.tanh(tmp35)
tmp38 = libdevice.tanh(tmp37)
tmp39 = tmp36 * tmp38
tmp40 = tmp35 + tmp39
tmp42 = tmp41 > tmp2
tmp43 = tl_math.exp(tmp41)
tmp44 = libdevice.log1p(tmp43)
tmp45 = tl.where(tmp42, tmp41, tmp44)
tmp46 = tmp40 * tmp45
tmp48 = tmp46 + tmp47
tmp49 = tl.sigmoid(tmp48)
tl.store(in_out_ptr0 + x3, tmp49, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_4, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_5, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_6, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_7, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_8, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_9, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_10, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_11, (1, 4, 1, 1), (4, 1, 1, 1))
assert_size_stride(primals_12, (1, 4, 1, 1), (4, 1, 1, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf1 = buf0
del buf0
get_raw_stream(0)
triton_poi_fused_add_mul_sigmoid_softplus_tanh_0[grid(256)](buf1,
primals_2, primals_1, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, primals_9, primals_10,
primals_11, primals_12, 256, XBLOCK=128, num_warps=4, num_stages=1)
del primals_12
return (buf1, primals_1, primals_2, primals_3, primals_4, primals_5,
primals_6, primals_7, primals_8, primals_9, primals_10, primals_11,
buf1)
class Bitparm(nn.Module):
"""
save params
"""
def __init__(self, channel, final=False):
super(Bitparm, self).__init__()
self.final = final
self.h = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
self.b = nn.Parameter(torch.nn.init.normal_(torch.empty(channel).
view(1, -1, 1, 1), 0, 0.01))
if not final:
self.a = nn.Parameter(torch.nn.init.normal_(torch.empty(channel
).view(1, -1, 1, 1), 0, 0.01))
else:
self.a = None
def forward(self, x):
if self.final:
return torch.sigmoid(x * F.softplus(self.h) + self.b)
else:
x = x * F.softplus(self.h) + self.b
return x + torch.tanh(x) * torch.tanh(self.a)
class BitEstimatorNew(nn.Module):
"""
Estimate bit
"""
def __init__(self, channel):
super(BitEstimatorNew, self).__init__()
self.f1 = Bitparm(channel)
self.f2 = Bitparm(channel)
self.f3 = Bitparm(channel)
self.f4 = Bitparm(channel, True)
def forward(self, input_0):
primals_1 = self.f1.h
primals_3 = self.f1.b
primals_4 = self.f1.a
primals_5 = self.f2.h
primals_6 = self.f2.b
primals_7 = self.f2.a
primals_8 = self.f3.h
primals_9 = self.f3.b
primals_10 = self.f3.a
primals_11 = self.f4.h
primals_12 = self.f4.b
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
Geunwoo-Jeon/iclr_17_compression
|
BitEstimator
| false | 13,725 |
[
"MIT"
] | 56 |
a28746b1f1c518d91125d8f289d9511cde488c77
|
https://github.com/Geunwoo-Jeon/iclr_17_compression/tree/a28746b1f1c518d91125d8f289d9511cde488c77
|
BCEWithLogitsLossWeighted
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/bg/cbg4ywmzvzuczhahtpfwzm3fqdikgxk5wekfsmcsvgxaa46kstde.py
# Topologically Sorted Source Nodes: [temp, mul, sum_1, weight_mask, sum_2, loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.sum, aten.ones_like, aten.div]
# Source node to ATen node mapping:
# loss => div
# mul => sub_2
# sum_1 => sum_1
# sum_2 => sum_2
# temp => abs_1, exp, full_default, log1p, minimum, mul, neg, sub, sub_1
# weight_mask => full_default_1
# Graph fragment:
# %sub : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (1, %arg0_1), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub, %arg1_1), kwargs = {})
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %minimum : [num_users=1] = call_function[target=torch.ops.aten.minimum.default](args = (%full_default, %arg1_1), kwargs = {})
# %abs_1 : [num_users=1] = call_function[target=torch.ops.aten.abs.default](args = (%arg1_1,), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%abs_1,), kwargs = {})
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%neg,), kwargs = {})
# %log1p : [num_users=1] = call_function[target=torch.ops.aten.log1p.default](args = (%exp,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%minimum, %log1p), kwargs = {})
# %sub_2 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul, %sub_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%sub_2,), kwargs = {})
# %full_default_1 : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%full_default_1,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %sum_2), kwargs = {})
triton_per_fused_binary_cross_entropy_with_logits_div_mul_ones_like_sum_0 = async_compile.triton('triton_per_fused_binary_cross_entropy_with_logits_div_mul_ones_like_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {3: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 4), equal_to_1=(3,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_binary_cross_entropy_with_logits_div_mul_ones_like_sum_0', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 2, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_div_mul_ones_like_sum_0(in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + (r0), None)
tmp3 = tl.load(in_ptr1 + (r0), None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = tl.broadcast_to(tmp1, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = tmp15 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp19, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0; del buf0 # reuse
# Topologically Sorted Source Nodes: [temp, mul, sum_1, weight_mask, sum_2, loss], Original ATen: [aten.binary_cross_entropy_with_logits, aten.mul, aten.sum, aten.ones_like, aten.div]
stream0 = get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_div_mul_ones_like_sum_0.run(buf2, arg0_1, arg1_1, 1, 256, grid=grid(1), stream=stream0)
del arg0_1
del arg1_1
return (buf2, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class WeightedLoss(nn.Module):
def __init__(self):
super(WeightedLoss, self).__init__()
self.weighted = False
def generate_weight_mask(self, mask, to_ignore=None):
""" Generates a weight mask where pixel weights are inversely proportional to
how many pixels are in the class
@param mask: a [N x ...] torch.FloatTensor with values in {0, 1, 2, ..., K+1}, where K is number of objects. {0,1} are background/table.
@param to_ignore: a list of classes (integers) to ignore when creating mask
@return: a torch.FloatTensor that is same shape as mask.
"""
N = mask.shape[0]
if self.weighted:
weight_mask = torch.zeros_like(mask).float()
for i in range(N):
unique_object_labels = torch.unique(mask[i])
for obj in unique_object_labels:
if to_ignore is not None and obj in to_ignore:
continue
num_pixels = torch.sum(mask[i] == obj, dtype=torch.float)
weight_mask[i, mask[i] == obj] = 1 / num_pixels
else:
weight_mask = torch.ones_like(mask)
if to_ignore is not None:
for obj in to_ignore:
weight_mask[mask == obj] = 0
return weight_mask
class BCEWithLogitsLossWeighted(WeightedLoss):
""" Compute weighted BCE loss with logits
"""
def __init__(self, weighted=False):
super(BCEWithLogitsLossWeighted, self).__init__()
self.BCEWithLogitsLoss = nn.BCEWithLogitsLoss(reduction='none')
self.weighted = weighted
def forward(self, x, target):
""" Compute masked cosine similarity loss
@param x: a [N x H x W] torch.FloatTensor of foreground logits
@param target: a [N x H x W] torch.FloatTensor of values in [0, 1]
"""
temp = self.BCEWithLogitsLoss(x, target)
weight_mask = self.generate_weight_mask(target)
loss = torch.sum(temp * weight_mask) / torch.sum(weight_mask)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_per_fused_binary_cross_entropy_with_logits_div_mul_ones_like_sum_0(
in_out_ptr0, in_ptr0, in_ptr1, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex
tmp0 = tl.load(in_ptr0 + r0, None)
tmp3 = tl.load(in_ptr1 + r0, None)
tmp1 = 1.0
tmp2 = tmp1 - tmp0
tmp4 = tmp2 * tmp3
tmp5 = 0.0
tmp6 = triton_helpers.minimum(tmp5, tmp3)
tmp7 = tl_math.abs(tmp3)
tmp8 = -tmp7
tmp9 = tl_math.exp(tmp8)
tmp10 = libdevice.log1p(tmp9)
tmp11 = tmp6 - tmp10
tmp12 = tmp4 - tmp11
tmp13 = tl.broadcast_to(tmp12, [RBLOCK])
tmp15 = triton_helpers.promote_to_tensor(tl.sum(tmp13, 0))
tmp16 = tl.broadcast_to(tmp1, [RBLOCK])
tmp18 = triton_helpers.promote_to_tensor(tl.sum(tmp16, 0))
tmp19 = tmp15 / tmp18
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp19, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((), (), torch.float32)
buf2 = buf0
del buf0
get_raw_stream(0)
triton_per_fused_binary_cross_entropy_with_logits_div_mul_ones_like_sum_0[
grid(1)](buf2, arg0_1, arg1_1, 1, 256, num_warps=2, num_stages=1)
del arg0_1
del arg1_1
return buf2,
class WeightedLoss(nn.Module):
def __init__(self):
super(WeightedLoss, self).__init__()
self.weighted = False
def generate_weight_mask(self, mask, to_ignore=None):
""" Generates a weight mask where pixel weights are inversely proportional to
how many pixels are in the class
@param mask: a [N x ...] torch.FloatTensor with values in {0, 1, 2, ..., K+1}, where K is number of objects. {0,1} are background/table.
@param to_ignore: a list of classes (integers) to ignore when creating mask
@return: a torch.FloatTensor that is same shape as mask.
"""
N = mask.shape[0]
if self.weighted:
weight_mask = torch.zeros_like(mask).float()
for i in range(N):
unique_object_labels = torch.unique(mask[i])
for obj in unique_object_labels:
if to_ignore is not None and obj in to_ignore:
continue
num_pixels = torch.sum(mask[i] == obj, dtype=torch.float)
weight_mask[i, mask[i] == obj] = 1 / num_pixels
else:
weight_mask = torch.ones_like(mask)
if to_ignore is not None:
for obj in to_ignore:
weight_mask[mask == obj] = 0
return weight_mask
class BCEWithLogitsLossWeightedNew(WeightedLoss):
""" Compute weighted BCE loss with logits
"""
def __init__(self, weighted=False):
super(BCEWithLogitsLossWeightedNew, self).__init__()
self.BCEWithLogitsLoss = nn.BCEWithLogitsLoss(reduction='none')
self.weighted = weighted
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Guangyun-Xu/uois
|
BCEWithLogitsLossWeighted
| false | 13,726 |
[
"MIT"
] | 106 |
00069af841dd3ea9a86e6e3a89c3b7222240e6e5
|
https://github.com/Guangyun-Xu/uois/tree/00069af841dd3ea9a86e6e3a89c3b7222240e6e5
|
TransformerEncoderLayer
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/fz/cfzmg4qtw6jgry4nhlwopodzjz62ll3n3ykfox77hwd2crdnlh2w.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => exp
# Graph fragment:
# %mul_tensor_3 : [num_users=2] = call_function[target=torch.ops.aten.mul.Tensor](args = (%bmm, 1), kwargs = {})
# %amax_default_3 : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%mul_tensor_3, [-1], True), kwargs = {})
# %sub_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%mul_tensor_3, %amax_default_3), kwargs = {})
# %div_tensor_3 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sub_tensor_3, 2.0), kwargs = {})
# %exp : [num_users=2] = call_function[target=torch.ops.aten.exp.default](args = (%div_tensor_3,), kwargs = {})
triton_poi_fused__softmax_0 = async_compile.triton('triton_poi_fused__softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp3 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + (x2), tmp17, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/kj/ckjtlefzavjukjsytvkak6ek26zmzexpcbnlwelx4k5kascjxlf3.py
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
# Source node to ATen node mapping:
# softmax => div_1, sum_1
# Graph fragment:
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [-1], True), kwargs = {})
# %div_1 : [num_users=2] = call_function[target=torch.ops.aten.div.Tensor](args = (%exp, %sum_1), kwargs = {})
triton_poi_fused__softmax_1 = async_compile.triton('triton_poi_fused__softmax_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__softmax_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (4*x1), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + (x2), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/mk/cmkim2hc4ksxhatli3y5cu7hoqofxcbzqrrxvnlhmswdt4cgww25.py
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
# Source node to ATen node mapping:
# cat => cat
# Graph fragment:
# %cat : [num_users=2] = call_function[target=torch.ops.aten.cat.default](args = ([%bmm_1, %bmm_3, %bmm_5, %bmm_7], -1), kwargs = {})
triton_poi_fused_cat_2 = async_compile.triton('triton_poi_fused_cat_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_cat_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 4, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = (xindex // 4)
x2 = xindex
tmp0 = x0
tmp1 = tl.full([1], 0, tl.int64)
tmp2 = tmp0 >= tmp1
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + (x1), tmp4 & xmask, eviction_policy='evict_last', other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + (x1), tmp9 & xmask, eviction_policy='evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + (x1), tmp14 & xmask, eviction_policy='evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tmp17 = tl.full([1], 4, tl.int64)
tmp18 = tmp0 < tmp17
tmp19 = tl.load(in_ptr3 + (x1), tmp16 & xmask, eviction_policy='evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + (x2), tmp22, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/7f/c7fwok6q7j5rvjs3ob32s2cth5xjbedhynzb5ozchylog57bhmxv.py
# Topologically Sorted Source Nodes: [add, mean, std], Original ATen: [aten.add, aten.mean, aten.std]
# Source node to ATen node mapping:
# add => add
# mean => mean
# std => var
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %cat), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
# %var : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add, [-1]), kwargs = {correction: 1.0, keepdim: True})
triton_poi_fused_add_mean_std_3 = async_compile.triton('triton_poi_fused_add_mean_std_3', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_mean_std_3', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + (4*x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(in_out_ptr0 + (x0), tmp29, xmask)
tl.store(out_ptr0 + (x0), tmp16, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dw/cdwd24bmovp4kvuenv3jq6ffpahgl34iziauouexc57lxivmzubp.py
# Topologically Sorted Source Nodes: [add, mean, std, sub, mul, add_1, truediv_4, add_2], Original ATen: [aten.add, aten.mean, aten.std, aten.sub, aten.mul, aten.div]
# Source node to ATen node mapping:
# add => add
# add_1 => add_1
# add_2 => add_2
# mean => mean
# mul => mul
# std => sqrt
# sub => sub_4
# truediv_4 => div_8
# Graph fragment:
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%primals_1, %cat), kwargs = {})
# %mean : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add, [-1], True), kwargs = {})
# %sqrt : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var,), kwargs = {})
# %sub_4 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add, %mean), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_5, %sub_4), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt, 1e-06), kwargs = {})
# %div_8 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul, %add_1), kwargs = {})
# %add_2 : [num_users=2] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_8, %primals_6), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_4 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_4', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*fp32', 6: '*fp32', 7: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 7), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_4', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 6, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2, in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr2 + (x2), xmask)
tmp4 = tl.load(in_ptr3 + (x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + (x1), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + (x2), tmp13, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/va/cvayouropyisaprtjrhemadbdvsels72axdjsrgmbayknhu335yc.py
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# relu => relu
# Graph fragment:
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%view_31,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_poi_fused_relu_threshold_backward_5 = async_compile.triton('triton_poi_fused_relu_threshold_backward_5', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*i1', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_relu_threshold_backward_5', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 2, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + (x2), xmask)
tmp1 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
tl.store(out_ptr0 + (x2), tmp6, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/dg/cdg2dxfjk7prchu44e4cgkid2y4524hl5vpyijgt6dwrnsrwzz2k.py
# Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add]
# Source node to ATen node mapping:
# add_3 => add_3
# Graph fragment:
# %add_3 : [num_users=4] = call_function[target=torch.ops.aten.add.Tensor](args = (%add_2, %view_33), kwargs = {})
triton_poi_fused_add_6 = async_compile.triton('triton_poi_fused_add_6', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_6', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (x2), xmask)
tmp1 = tl.load(in_out_ptr0 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (x0), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + (x2), tmp4, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/j4/cj4wrybpym5umgwi5ropl654n64ptcknq2hunhzirmo6b5jmhqyj.py
# Topologically Sorted Source Nodes: [mean_2, std_2, sub_1, mul_1, add_4, truediv_5, add_5], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
# Source node to ATen node mapping:
# add_4 => add_4
# add_5 => add_5
# mean_2 => mean_1
# mul_1 => mul_1
# std_2 => sqrt_1, var_1
# sub_1 => sub_5
# truediv_5 => div_9
# Graph fragment:
# %mean_1 : [num_users=1] = call_function[target=torch.ops.aten.mean.dim](args = (%add_3, [-1], True), kwargs = {})
# %var_1 : [num_users=1] = call_function[target=torch.ops.aten.var.correction](args = (%add_3, [-1]), kwargs = {correction: 1.0, keepdim: True})
# %sqrt_1 : [num_users=1] = call_function[target=torch.ops.aten.sqrt.default](args = (%var_1,), kwargs = {})
# %sub_5 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%add_3, %mean_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_11, %sub_5), kwargs = {})
# %add_4 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%sqrt_1, 1e-06), kwargs = {})
# %div_9 : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%mul_1, %add_4), kwargs = {})
# %add_5 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%div_9, %primals_12), kwargs = {})
triton_poi_fused_add_div_mean_mul_std_sub_7 = async_compile.triton('triton_poi_fused_add_div_mean_mul_std_sub_7', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_div_mean_mul_std_sub_7', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 7, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = (xindex // 4)
tmp0 = tl.load(in_ptr0 + (x0), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (x2), xmask)
tmp2 = tl.load(in_ptr1 + (4*x1), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + (4*x1)), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + (4*x1)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + (4*x1)), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + (x0), xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + (x2), tmp31, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4, ), (1, ))
assert_size_stride(primals_6, (4, ), (1, ))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4, ), (1, ))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4, ), (1, ))
assert_size_stride(primals_11, (4, ), (1, ))
assert_size_stride(primals_12, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_1], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [linear_2], Original ATen: [aten.mm]
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0), reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [dot_products], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__softmax_0.run(buf3, buf4, 64, grid=grid(64), stream=stream0)
buf5 = buf3; del buf3 # reuse
# Topologically Sorted Source Nodes: [softmax], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf4, buf5, 64, grid=grid(64), stream=stream0)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_1], Original ATen: [aten.bmm]
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 0), out=buf6)
buf7 = buf4; del buf4 # reuse
# Topologically Sorted Source Nodes: [dot_products_1], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf7, buf8, 64, grid=grid(64), stream=stream0)
buf9 = buf7; del buf7 # reuse
# Topologically Sorted Source Nodes: [softmax_1], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf8, buf9, 64, grid=grid(64), stream=stream0)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_3], Original ATen: [aten.bmm]
extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 1), out=buf10)
buf11 = buf8; del buf8 # reuse
# Topologically Sorted Source Nodes: [dot_products_2], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf11, buf12, 64, grid=grid(64), stream=stream0)
buf13 = buf11; del buf11 # reuse
# Topologically Sorted Source Nodes: [softmax_2], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf12, buf13, 64, grid=grid(64), stream=stream0)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_5], Original ATen: [aten.bmm]
extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 2), out=buf14)
buf15 = buf12; del buf12 # reuse
# Topologically Sorted Source Nodes: [dot_products_3], Original ATen: [aten.bmm]
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_0.run(buf15, buf16, 64, grid=grid(64), stream=stream0)
buf17 = buf15; del buf15 # reuse
# Topologically Sorted Source Nodes: [softmax_3], Original ATen: [aten._softmax]
triton_poi_fused__softmax_1.run(buf16, buf17, 64, grid=grid(64), stream=stream0)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
# Topologically Sorted Source Nodes: [matmul_7], Original ATen: [aten.bmm]
extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16, 4, 1), 3), out=buf18)
buf19 = buf16; del buf16 # reuse
# Topologically Sorted Source Nodes: [cat], Original ATen: [aten.cat]
triton_poi_fused_cat_2.run(buf6, buf10, buf14, buf18, buf19, 64, grid=grid(64), stream=stream0)
del buf10
del buf14
buf20 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0); del buf6 # reuse
buf21 = buf20; del buf20 # reuse
buf22 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0); del buf18 # reuse
# Topologically Sorted Source Nodes: [add, mean, std], Original ATen: [aten.add, aten.mean, aten.std]
triton_poi_fused_add_mean_std_3.run(buf21, primals_1, buf19, buf22, 16, grid=grid(16), stream=stream0)
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [add, mean, std, sub, mul, add_1, truediv_4, add_2], Original ATen: [aten.add, aten.mean, aten.std, aten.sub, aten.mul, aten.div]
triton_poi_fused_add_div_mean_mul_std_sub_4.run(primals_5, primals_1, buf19, buf22, buf21, primals_6, buf23, 64, grid=grid(64), stream=stream0)
del buf21
del buf22
del primals_6
buf24 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf23, (16, 4), (4, 1), 0), reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf24)
buf25 = reinterpret_tensor(buf24, (4, 4, 4), (16, 4, 1), 0); del buf24 # reuse
buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
# Topologically Sorted Source Nodes: [relu], Original ATen: [aten.relu, aten.threshold_backward]
triton_poi_fused_relu_threshold_backward_5.run(buf25, primals_8, buf29, 64, grid=grid(64), stream=stream0)
del primals_8
buf26 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
# Topologically Sorted Source Nodes: [], Original ATen: []
extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0), reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf26)
buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0); del buf26 # reuse
# Topologically Sorted Source Nodes: [add_3], Original ATen: [aten.add]
triton_poi_fused_add_6.run(buf27, buf23, primals_10, 64, grid=grid(64), stream=stream0)
del primals_10
buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mean_2, std_2, sub_1, mul_1, add_4, truediv_5, add_5], Original ATen: [aten.mean, aten.std, aten.sub, aten.mul, aten.add, aten.div]
triton_poi_fused_add_div_mean_mul_std_sub_7.run(primals_11, buf27, primals_12, buf28, 64, grid=grid(64), stream=stream0)
del primals_12
return (buf28, primals_1, primals_5, primals_11, buf5, buf9, buf13, buf17, buf19, reinterpret_tensor(buf23, (16, 4), (4, 1), 0), reinterpret_tensor(buf25, (16, 4), (4, 1), 0), buf27, primals_9, buf29, primals_7, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1), reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0), reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0), )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_5 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_6 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_7 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_8 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_9 = rand_strided((4, 4), (4, 1), device='cuda:0', dtype=torch.float32)
primals_10 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_11 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_12 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4, primals_5, primals_6, primals_7, primals_8, primals_9, primals_10, primals_11, primals_12])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import math
import torch
from torch import nn
from torch.nn import functional as F
import torch.utils.data
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class Feedforward(nn.Module):
def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2):
super().__init__()
if activation is not None:
self.activation = getattr(torch, activation)
else:
self.activation = lambda x: x
self.linear = Linear(d_in, d_out, bias=bias)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.activation(self.linear(self.dropout(x)))
class LinearReLU(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.feedforward = Feedforward(d_model, d_hidden, activation='relu')
self.linear = Linear(d_hidden, d_model)
def forward(self, x, padding=None):
return self.linear(self.feedforward(x))
class Attention(nn.Module):
def __init__(self, d_key, dropout_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(dropout_ratio)
self.causal = causal
def forward(self, query, key, value, padding=None):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and self.causal:
tri = key.new_ones((key.size(1), key.size(1))).triu(1) * INF
dot_products.sub_(tri.unsqueeze(0))
if padding is not None:
dot_products.masked_fill_(padding.unsqueeze(1).expand_as(
dot_products), -INF)
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=-1)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, dropout_ratio, causal=causal)
self.wq = Linear(d_key, d_key, bias=False)
self.wk = Linear(d_key, d_key, bias=False)
self.wv = Linear(d_value, d_value, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value, padding=None):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key,
value))
return torch.cat([self.attention(q, k, v, padding=padding) for q, k,
v in zip(query, key, value)], -1)
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ResidualBlock(nn.Module):
def __init__(self, layer, d_model, dropout_ratio):
super().__init__()
self.layer = layer
self.dropout = nn.Dropout(dropout_ratio)
self.layernorm = LayerNorm(d_model)
def forward(self, *x, padding=None):
return self.layernorm(x[0] + self.dropout(self.layer(*x, padding=
padding)))
class TransformerEncoderLayer(nn.Module):
def __init__(self, dimension, n_heads, hidden, dropout):
super().__init__()
self.selfattn = ResidualBlock(MultiHead(dimension, dimension,
n_heads, dropout), dimension, dropout)
self.feedforward = ResidualBlock(LinearReLU(dimension, hidden),
dimension, dropout)
def forward(self, x, padding=None):
return self.feedforward(self.selfattn(x, x, x, padding=padding))
def get_inputs():
return [torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {'dimension': 4, 'n_heads': 4, 'hidden': 4, 'dropout': 0.5}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
import math
from torch import nn
from torch.nn import functional as F
import torch.utils.data
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_poi_fused__softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp3 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last'
)
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp4 = tmp3 * tmp1
tmp6 = tmp5 * tmp1
tmp7 = triton_helpers.maximum(tmp4, tmp6)
tmp9 = tmp8 * tmp1
tmp10 = triton_helpers.maximum(tmp7, tmp9)
tmp12 = tmp11 * tmp1
tmp13 = triton_helpers.maximum(tmp10, tmp12)
tmp14 = tmp2 - tmp13
tmp15 = 0.5
tmp16 = tmp14 * tmp15
tmp17 = tl_math.exp(tmp16)
tl.store(out_ptr0 + x2, tmp17, xmask)
@triton.jit
def triton_poi_fused__softmax_1(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.constexpr
):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + 4 * x1, xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 + tmp4
tmp7 = tmp5 + tmp6
tmp8 = tmp0 / tmp7
tl.store(out_ptr0 + x2, tmp8, xmask)
@triton.jit
def triton_poi_fused_cat_2(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x1 = xindex // 4
x2 = xindex
tmp0 = x0
tl.full([1], 0, tl.int64)
tmp3 = tl.full([1], 1, tl.int64)
tmp4 = tmp0 < tmp3
tmp5 = tl.load(in_ptr0 + x1, tmp4 & xmask, eviction_policy='evict_last',
other=0.0)
tmp6 = tmp0 >= tmp3
tmp7 = tl.full([1], 2, tl.int64)
tmp8 = tmp0 < tmp7
tmp9 = tmp6 & tmp8
tmp10 = tl.load(in_ptr1 + x1, tmp9 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp11 = tmp0 >= tmp7
tmp12 = tl.full([1], 3, tl.int64)
tmp13 = tmp0 < tmp12
tmp14 = tmp11 & tmp13
tmp15 = tl.load(in_ptr2 + x1, tmp14 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp16 = tmp0 >= tmp12
tl.full([1], 4, tl.int64)
tmp19 = tl.load(in_ptr3 + x1, tmp16 & xmask, eviction_policy=
'evict_last', other=0.0)
tmp20 = tl.where(tmp14, tmp15, tmp19)
tmp21 = tl.where(tmp9, tmp10, tmp20)
tmp22 = tl.where(tmp4, tmp5, tmp21)
tl.store(out_ptr0 + x2, tmp22, xmask)
@triton.jit
def triton_poi_fused_add_mean_std_3(in_out_ptr0, in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 16
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex
tmp0 = tl.load(in_ptr0 + 4 * x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 + tmp1
tmp5 = tmp3 + tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 + tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 + tmp12
tmp14 = tmp10 + tmp13
tmp15 = 4.0
tmp16 = tmp14 / tmp15
tmp17 = tmp2 - tmp16
tmp18 = tmp17 * tmp17
tmp19 = tmp5 - tmp16
tmp20 = tmp19 * tmp19
tmp21 = tmp18 + tmp20
tmp22 = tmp9 - tmp16
tmp23 = tmp22 * tmp22
tmp24 = tmp21 + tmp23
tmp25 = tmp13 - tmp16
tmp26 = tmp25 * tmp25
tmp27 = tmp24 + tmp26
tmp28 = 3.0
tmp29 = tmp27 / tmp28
tl.store(in_out_ptr0 + x0, tmp29, xmask)
tl.store(out_ptr0 + x0, tmp16, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_4(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, in_ptr4, in_ptr5, out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr2 + x2, xmask)
tmp4 = tl.load(in_ptr3 + x1, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr4 + x1, xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr5 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp5 = tmp3 - tmp4
tmp6 = tmp0 * tmp5
tmp8 = libdevice.sqrt(tmp7)
tmp9 = 1e-06
tmp10 = tmp8 + tmp9
tmp11 = tmp6 / tmp10
tmp13 = tmp11 + tmp12
tl.store(out_ptr0 + x2, tmp13, xmask)
@triton.jit
def triton_poi_fused_relu_threshold_backward_5(in_out_ptr0, in_ptr0,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_out_ptr0 + x2, xmask)
tmp1 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp2 = tmp0 + tmp1
tmp3 = tl.full([1], 0, tl.int32)
tmp4 = triton_helpers.maximum(tmp3, tmp2)
tmp5 = 0.0
tmp6 = tmp4 <= tmp5
tl.store(in_out_ptr0 + x2, tmp4, xmask)
tl.store(out_ptr0 + x2, tmp6, xmask)
@triton.jit
def triton_poi_fused_add_6(in_out_ptr0, in_ptr0, in_ptr1, xnumel, XBLOCK:
tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + x2, xmask)
tmp1 = tl.load(in_out_ptr0 + x2, xmask)
tmp2 = tl.load(in_ptr1 + x0, xmask, eviction_policy='evict_last')
tmp3 = tmp1 + tmp2
tmp4 = tmp0 + tmp3
tl.store(in_out_ptr0 + x2, tmp4, xmask)
@triton.jit
def triton_poi_fused_add_div_mean_mul_std_sub_7(in_ptr0, in_ptr1, in_ptr2,
out_ptr0, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex
x1 = xindex // 4
tmp0 = tl.load(in_ptr0 + x0, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + x2, xmask)
tmp2 = tl.load(in_ptr1 + 4 * x1, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (1 + 4 * x1), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr1 + (2 + 4 * x1), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (3 + 4 * x1), xmask, eviction_policy='evict_last')
tmp30 = tl.load(in_ptr2 + x0, xmask, eviction_policy='evict_last')
tmp4 = tmp2 + tmp3
tmp6 = tmp4 + tmp5
tmp8 = tmp6 + tmp7
tmp9 = 4.0
tmp10 = tmp8 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp0 * tmp11
tmp13 = tmp2 - tmp10
tmp14 = tmp13 * tmp13
tmp15 = tmp3 - tmp10
tmp16 = tmp15 * tmp15
tmp17 = tmp14 + tmp16
tmp18 = tmp5 - tmp10
tmp19 = tmp18 * tmp18
tmp20 = tmp17 + tmp19
tmp21 = tmp7 - tmp10
tmp22 = tmp21 * tmp21
tmp23 = tmp20 + tmp22
tmp24 = 3.0
tmp25 = tmp23 / tmp24
tmp26 = libdevice.sqrt(tmp25)
tmp27 = 1e-06
tmp28 = tmp26 + tmp27
tmp29 = tmp12 / tmp28
tmp31 = tmp29 + tmp30
tl.store(out_ptr0 + x2, tmp31, xmask)
def call(args):
(primals_1, primals_2, primals_3, primals_4, primals_5, primals_6,
primals_7, primals_8, primals_9, primals_10, primals_11, primals_12
) = args
args.clear()
assert_size_stride(primals_1, (4, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4), (4, 1))
assert_size_stride(primals_3, (4, 4), (4, 1))
assert_size_stride(primals_4, (4, 4), (4, 1))
assert_size_stride(primals_5, (4,), (1,))
assert_size_stride(primals_6, (4,), (1,))
assert_size_stride(primals_7, (4, 4), (4, 1))
assert_size_stride(primals_8, (4,), (1,))
assert_size_stride(primals_9, (4, 4), (4, 1))
assert_size_stride(primals_10, (4,), (1,))
assert_size_stride(primals_11, (4,), (1,))
assert_size_stride(primals_12, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_2, (4, 4), (1, 4), 0), out=buf0)
del primals_2
buf1 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_3, (4, 4), (1, 4), 0), out=buf1)
del primals_3
buf2 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(primals_1, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_4, (4, 4), (1, 4), 0), out=buf2)
del primals_4
buf3 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
0), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 0), out=buf3)
buf4 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__softmax_0[grid(64)](buf3, buf4, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf5 = buf3
del buf3
triton_poi_fused__softmax_1[grid(64)](buf4, buf5, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf6 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf5, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 0), out=buf6)
buf7 = buf4
del buf4
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
1), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 1), out=buf7)
buf8 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf7, buf8, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf9 = buf7
del buf7
triton_poi_fused__softmax_1[grid(64)](buf8, buf9, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf10 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf9, reinterpret_tensor(buf2, (4, 4, 1), (16, 4,
1), 1), out=buf10)
buf11 = buf8
del buf8
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
2), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 2), out=buf11)
buf12 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf11, buf12, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf13 = buf11
del buf11
triton_poi_fused__softmax_1[grid(64)](buf12, buf13, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf14 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf13, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 2), out=buf14)
buf15 = buf12
del buf12
extern_kernels.bmm(reinterpret_tensor(buf0, (4, 4, 1), (16, 4, 1),
3), reinterpret_tensor(buf1, (4, 1, 4), (16, 1, 4), 3), out=buf15)
buf16 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__softmax_0[grid(64)](buf15, buf16, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf17 = buf15
del buf15
triton_poi_fused__softmax_1[grid(64)](buf16, buf17, 64, XBLOCK=64,
num_warps=1, num_stages=1)
buf18 = empty_strided_cuda((4, 4, 1), (4, 1, 1), torch.float32)
extern_kernels.bmm(buf17, reinterpret_tensor(buf2, (4, 4, 1), (16,
4, 1), 3), out=buf18)
buf19 = buf16
del buf16
triton_poi_fused_cat_2[grid(64)](buf6, buf10, buf14, buf18, buf19,
64, XBLOCK=64, num_warps=1, num_stages=1)
del buf10
del buf14
buf20 = reinterpret_tensor(buf6, (4, 4, 1), (4, 1, 16), 0)
del buf6
buf21 = buf20
del buf20
buf22 = reinterpret_tensor(buf18, (4, 4, 1), (4, 1, 16), 0)
del buf18
triton_poi_fused_add_mean_std_3[grid(16)](buf21, primals_1, buf19,
buf22, 16, XBLOCK=16, num_warps=1, num_stages=1)
buf23 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_4[grid(64)](primals_5,
primals_1, buf19, buf22, buf21, primals_6, buf23, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del buf21
del buf22
del primals_6
buf24 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf23, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_7, (4, 4), (1, 4), 0), out=buf24)
buf25 = reinterpret_tensor(buf24, (4, 4, 4), (16, 4, 1), 0)
del buf24
buf29 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
triton_poi_fused_relu_threshold_backward_5[grid(64)](buf25,
primals_8, buf29, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_8
buf26 = empty_strided_cuda((16, 4), (4, 1), torch.float32)
extern_kernels.mm(reinterpret_tensor(buf25, (16, 4), (4, 1), 0),
reinterpret_tensor(primals_9, (4, 4), (1, 4), 0), out=buf26)
buf27 = reinterpret_tensor(buf26, (4, 4, 4), (16, 4, 1), 0)
del buf26
triton_poi_fused_add_6[grid(64)](buf27, buf23, primals_10, 64,
XBLOCK=64, num_warps=1, num_stages=1)
del primals_10
buf28 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused_add_div_mean_mul_std_sub_7[grid(64)](primals_11,
buf27, primals_12, buf28, 64, XBLOCK=64, num_warps=1, num_stages=1)
del primals_12
return (buf28, primals_1, primals_5, primals_11, buf5, buf9, buf13,
buf17, buf19, reinterpret_tensor(buf23, (16, 4), (4, 1), 0),
reinterpret_tensor(buf25, (16, 4), (4, 1), 0), buf27, primals_9,
buf29, primals_7, reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 3
), reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 3),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 3),
reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 2),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 2),
reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 1),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 1),
reinterpret_tensor(buf2, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf0, (4, 1, 4), (16, 1, 4), 0),
reinterpret_tensor(buf1, (4, 4, 1), (16, 4, 1), 0))
def matmul(x, y):
if x.dim() == y.dim():
return x @ y
if x.dim() == y.dim() - 1:
return (x.unsqueeze(-2) @ y).squeeze(-2)
return (x @ y.unsqueeze(-2)).squeeze(-2)
class Linear(nn.Linear):
def forward(self, x):
size = x.size()
return super().forward(x.contiguous().view(-1, size[-1])).view(*
size[:-1], -1)
class Feedforward(nn.Module):
def __init__(self, d_in, d_out, activation=None, bias=True, dropout=0.2):
super().__init__()
if activation is not None:
self.activation = getattr(torch, activation)
else:
self.activation = lambda x: x
self.linear = Linear(d_in, d_out, bias=bias)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.activation(self.linear(self.dropout(x)))
class LinearReLU(nn.Module):
def __init__(self, d_model, d_hidden):
super().__init__()
self.feedforward = Feedforward(d_model, d_hidden, activation='relu')
self.linear = Linear(d_hidden, d_model)
def forward(self, x, padding=None):
return self.linear(self.feedforward(x))
class Attention(nn.Module):
def __init__(self, d_key, dropout_ratio, causal):
super().__init__()
self.scale = math.sqrt(d_key)
self.dropout = nn.Dropout(dropout_ratio)
self.causal = causal
def forward(self, query, key, value, padding=None):
dot_products = matmul(query, key.transpose(1, 2))
if query.dim() == 3 and self.causal:
tri = key.new_ones((key.size(1), key.size(1))).triu(1) * INF
dot_products.sub_(tri.unsqueeze(0))
if padding is not None:
dot_products.masked_fill_(padding.unsqueeze(1).expand_as(
dot_products), -INF)
return matmul(self.dropout(F.softmax(dot_products / self.scale, dim
=-1)), value)
class MultiHead(nn.Module):
def __init__(self, d_key, d_value, n_heads, dropout_ratio, causal=False):
super().__init__()
self.attention = Attention(d_key, dropout_ratio, causal=causal)
self.wq = Linear(d_key, d_key, bias=False)
self.wk = Linear(d_key, d_key, bias=False)
self.wv = Linear(d_value, d_value, bias=False)
self.n_heads = n_heads
def forward(self, query, key, value, padding=None):
query, key, value = self.wq(query), self.wk(key), self.wv(value)
query, key, value = (x.chunk(self.n_heads, -1) for x in (query, key,
value))
return torch.cat([self.attention(q, k, v, padding=padding) for q, k,
v in zip(query, key, value)], -1)
class LayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-06):
super().__init__()
self.gamma = nn.Parameter(torch.ones(d_model))
self.beta = nn.Parameter(torch.zeros(d_model))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ResidualBlock(nn.Module):
def __init__(self, layer, d_model, dropout_ratio):
super().__init__()
self.layer = layer
self.dropout = nn.Dropout(dropout_ratio)
self.layernorm = LayerNorm(d_model)
def forward(self, *x, padding=None):
return self.layernorm(x[0] + self.dropout(self.layer(*x, padding=
padding)))
class TransformerEncoderLayerNew(nn.Module):
def __init__(self, dimension, n_heads, hidden, dropout):
super().__init__()
self.selfattn = ResidualBlock(MultiHead(dimension, dimension,
n_heads, dropout), dimension, dropout)
self.feedforward = ResidualBlock(LinearReLU(dimension, hidden),
dimension, dropout)
def forward(self, input_0):
primals_2 = self.selfattn.layer.wq.weight
primals_3 = self.selfattn.layer.wk.weight
primals_4 = self.selfattn.layer.wv.weight
primals_5 = self.selfattn.layernorm.gamma
primals_6 = self.selfattn.layernorm.beta
primals_7 = self.feedforward.layer.feedforward.linear.weight
primals_8 = self.feedforward.layer.feedforward.linear.bias
primals_9 = self.feedforward.layer.linear.weight
primals_10 = self.feedforward.layer.linear.bias
primals_11 = self.feedforward.layernorm.gamma
primals_12 = self.feedforward.layernorm.beta
primals_1 = input_0
output = call([primals_1, primals_2, primals_3, primals_4,
primals_5, primals_6, primals_7, primals_8, primals_9,
primals_10, primals_11, primals_12])
return output[0]
|
FGDBTKD/decaNLP
|
TransformerEncoderLayer
| false | 13,727 |
[
"BSD-3-Clause"
] | 2,361 |
ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
|
https://github.com/FGDBTKD/decaNLP/tree/ff2d7e18cc226197bb8fe5fe796c4b8bc0395e86
|
AveragePooling
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/4d/c4dx5dtglp5hpi3omo5xmukglcgv7f2ug2u4gm65rtchytndj27z.py
# Topologically Sorted Source Nodes: [masked_fill_, x_sum, x_num_1, truediv], Original ATen: [aten.masked_fill, aten.sum, aten.clamp, aten.div]
# Source node to ATen node mapping:
# masked_fill_ => full_default, where
# truediv => div
# x_num_1 => clamp_min
# x_sum => sum_1
# Graph fragment:
# %full_default : [num_users=1] = call_function[target=torch.ops.aten.full.default](args = ([], 0.0), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%expand, %full_default, %arg0_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%where, [1]), kwargs = {})
# %clamp_min : [num_users=1] = call_function[target=torch.ops.aten.clamp_min.default](args = (%expand_1, 1), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_1, %clamp_min), kwargs = {})
triton_poi_fused_clamp_div_masked_fill_sum_0 = async_compile.triton('triton_poi_fused_clamp_div_masked_fill_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_clamp_div_masked_fill_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_clamp_div_masked_fill_sum_0(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = (xindex // 16)
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr1 + (x3 + (64*x2)), xmask)
tmp5 = tl.load(in_ptr0 + (4 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr1 + (16 + x3 + (64*x2)), xmask)
tmp10 = tl.load(in_ptr0 + (8 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (32 + x3 + (64*x2)), xmask)
tmp15 = tl.load(in_ptr0 + (12 + x0 + (16*x2)), xmask, eviction_policy='evict_last')
tmp17 = tl.load(in_ptr1 + (48 + x3 + (64*x2)), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = tl.where(tmp2, tmp1, tmp3)
tmp6 = tmp5 == tmp1
tmp8 = tl.where(tmp6, tmp1, tmp7)
tmp9 = tmp4 + tmp8
tmp11 = tmp10 == tmp1
tmp13 = tl.where(tmp11, tmp1, tmp12)
tmp14 = tmp9 + tmp13
tmp16 = tmp15 == tmp1
tmp18 = tl.where(tmp16, tmp1, tmp17)
tmp19 = tmp14 + tmp18
tmp20 = 1.0
tmp21 = tmp0 == tmp20
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp5 == tmp20
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp22 + tmp24
tmp26 = tmp10 == tmp20
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp25 + tmp27
tmp29 = tmp15 == tmp20
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp28 + tmp30
tmp32 = triton_helpers.maximum(tmp31, tmp20)
tmp33 = tmp19 / tmp32
tl.store(out_ptr0 + (x4), tmp33, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [masked_fill_, x_sum, x_num_1, truediv], Original ATen: [aten.masked_fill, aten.sum, aten.clamp, aten.div]
stream0 = get_raw_stream(0)
triton_poi_fused_clamp_div_masked_fill_sum_0.run(arg1_1, arg0_1, buf0, 64, grid=grid(64), stream=stream0)
del arg0_1
del arg1_1
return (buf0, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class AveragePooling(nn.Module):
def __init__(self):
super(AveragePooling, self).__init__()
"""
(item, subitem) can be (word, characters), or (sentence, words)
x: num_items x max_subitem_size x input_size
x_mask: num_items x max_subitem_size
return num_items x input_size
"""
def forward(self, x, x_mask):
"""
x_output: num_items x input_size x 1 --> num_items x input_size
"""
x_now = x.clone()
empty_mask = x_mask.eq(0).unsqueeze(2).expand_as(x_now)
x_now.data.masked_fill_(empty_mask.data, 0)
x_sum = torch.sum(x_now, 1)
x_num = torch.sum(x_mask.eq(1).float(), 1).unsqueeze(1).expand_as(x_sum
)
x_num = torch.clamp(x_num, min=1)
return x_sum / x_num
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_clamp_div_masked_fill_sum_0(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 4
x2 = xindex // 16
x3 = xindex % 16
x4 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = tl.load(in_ptr1 + (x3 + 64 * x2), xmask)
tmp5 = tl.load(in_ptr0 + (4 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp7 = tl.load(in_ptr1 + (16 + x3 + 64 * x2), xmask)
tmp10 = tl.load(in_ptr0 + (8 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp12 = tl.load(in_ptr1 + (32 + x3 + 64 * x2), xmask)
tmp15 = tl.load(in_ptr0 + (12 + x0 + 16 * x2), xmask, eviction_policy=
'evict_last')
tmp17 = tl.load(in_ptr1 + (48 + x3 + 64 * x2), xmask)
tmp1 = 0.0
tmp2 = tmp0 == tmp1
tmp4 = tl.where(tmp2, tmp1, tmp3)
tmp6 = tmp5 == tmp1
tmp8 = tl.where(tmp6, tmp1, tmp7)
tmp9 = tmp4 + tmp8
tmp11 = tmp10 == tmp1
tmp13 = tl.where(tmp11, tmp1, tmp12)
tmp14 = tmp9 + tmp13
tmp16 = tmp15 == tmp1
tmp18 = tl.where(tmp16, tmp1, tmp17)
tmp19 = tmp14 + tmp18
tmp20 = 1.0
tmp21 = tmp0 == tmp20
tmp22 = tmp21.to(tl.float32)
tmp23 = tmp5 == tmp20
tmp24 = tmp23.to(tl.float32)
tmp25 = tmp22 + tmp24
tmp26 = tmp10 == tmp20
tmp27 = tmp26.to(tl.float32)
tmp28 = tmp25 + tmp27
tmp29 = tmp15 == tmp20
tmp30 = tmp29.to(tl.float32)
tmp31 = tmp28 + tmp30
tmp32 = triton_helpers.maximum(tmp31, tmp20)
tmp33 = tmp19 / tmp32
tl.store(out_ptr0 + x4, tmp33, xmask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_clamp_div_masked_fill_sum_0[grid(64)](arg1_1,
arg0_1, buf0, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del arg1_1
return buf0,
class AveragePoolingNew(nn.Module):
def __init__(self):
super(AveragePoolingNew, self).__init__()
"""
(item, subitem) can be (word, characters), or (sentence, words)
x: num_items x max_subitem_size x input_size
x_mask: num_items x max_subitem_size
return num_items x input_size
"""
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
GingerNg/SDNet
|
AveragePooling
| false | 13,728 |
[
"MIT"
] | 112 |
48ad8cc57c9a02aaad10e34d0c91a174ac68f056
|
https://github.com/GingerNg/SDNet/tree/48ad8cc57c9a02aaad10e34d0c91a174ac68f056
|
CELossWeighted
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/td/ctdj5kazgiki6gdaadhqtp2x7tq2ee5ey5hqqdcoqmp54jyhf74f.py
# Topologically Sorted Source Nodes: [temp], Original ATen: [aten._log_softmax]
# Source node to ATen node mapping:
# temp => amax, sub
# Graph fragment:
# %amax : [num_users=1] = call_function[target=torch.ops.aten.amax.default](args = (%arg1_1, [1], True), kwargs = {})
# %sub : [num_users=2] = call_function[target=torch.ops.aten.sub.Tensor](args = (%arg1_1, %amax), kwargs = {})
triton_poi_fused__log_softmax_0 = async_compile.triton('triton_poi_fused__log_softmax_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[256],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 5, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = (xindex // 64)
tmp0 = tl.load(in_ptr0 + (x3), xmask)
tmp1 = tl.load(in_ptr0 + (x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + (64*x2)), xmask, eviction_policy='evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + (x3), tmp8, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/55/c55jnxqzctcsykbux55atvovnot3atqg2zkgotvahahcn7zcnzea.py
# Topologically Sorted Source Nodes: [temp], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg]
# Source node to ATen node mapping:
# temp => exp, log, mul, neg, sub_1, sum_1, sum_2
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
triton_poi_fused__log_softmax_mul_neg_sum_1 = async_compile.triton('triton_poi_fused__log_softmax_mul_neg_sum_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused__log_softmax_mul_neg_sum_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 8, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = (xindex // 16)
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + (64*x1)), xmask)
tmp2 = tl.load(in_ptr0 + (16 + x0 + (64*x1)), xmask)
tmp5 = tl.load(in_ptr0 + (32 + x0 + (64*x1)), xmask)
tmp8 = tl.load(in_ptr0 + (48 + x0 + (64*x1)), xmask)
tmp13 = tl.load(in_ptr1 + (x0 + (64*x1)), xmask)
tmp16 = tl.load(in_ptr1 + (16 + x0 + (64*x1)), xmask)
tmp20 = tl.load(in_ptr1 + (32 + x0 + (64*x1)), xmask)
tmp24 = tl.load(in_ptr1 + (48 + x0 + (64*x1)), xmask)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tl.store(out_ptr0 + (x2), tmp27, xmask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/cw/ccwyurqpxgu7udomt2dp5hgffrxjdx3id46lnw2mjdyvh52jzrwi.py
# Topologically Sorted Source Nodes: [temp, weight_mask, mul, sum_1, sum_2, loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.ones_like, aten.div]
# Source node to ATen node mapping:
# loss => div
# mul => mul_1
# sum_1 => sum_3
# sum_2 => sum_4
# temp => exp, log, mul, neg, sub_1, sum_1, sum_2
# weight_mask => full_default
# Graph fragment:
# %exp : [num_users=1] = call_function[target=torch.ops.aten.exp.default](args = (%sub,), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%exp, [1], True), kwargs = {})
# %log : [num_users=1] = call_function[target=torch.ops.aten.log.default](args = (%sum_1,), kwargs = {})
# %sub_1 : [num_users=1] = call_function[target=torch.ops.aten.sub.Tensor](args = (%sub, %log), kwargs = {})
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%sub_1, %arg0_1), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [1]), kwargs = {})
# %neg : [num_users=1] = call_function[target=torch.ops.aten.neg.default](args = (%sum_2,), kwargs = {})
# %full_default : [num_users=2] = call_function[target=torch.ops.aten.full.default](args = ([4, 4, 4, 4], 1), kwargs = {dtype: torch.float32, layout: torch.strided, device: cuda:0, pin_memory: False})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%neg, %full_default), kwargs = {})
# %sum_3 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%mul_1,), kwargs = {})
# %sum_4 : [num_users=1] = call_function[target=torch.ops.aten.sum.default](args = (%full_default,), kwargs = {})
# %div : [num_users=1] = call_function[target=torch.ops.aten.div.Tensor](args = (%sum_3, %sum_4), kwargs = {})
triton_per_fused__log_softmax_div_mul_neg_ones_like_sum_2 = async_compile.triton('triton_per_fused__log_softmax_div_mul_neg_ones_like_sum_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[1, 256],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {2: 1}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 3), equal_to_1=(2,))]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused__log_softmax_div_mul_neg_ones_like_sum_2', 'mutated_arg_names': ['in_out_ptr0'], 'no_x_dim': True, 'num_load': 1, 'num_reduction': 2, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_ones_like_sum_2(in_out_ptr0, in_ptr0, xnumel, rnumel):
xnumel = 1
XBLOCK: tl.constexpr = 1
rnumel = 256
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = tl.full([1], xoffset, tl.int32)
xmask = tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
roffset = 0
rmask = tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + (r0), None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp1, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tmp5 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + (tl.full([1], 0, tl.int32)), tmp9, None)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [temp], Original ATen: [aten._log_softmax]
stream0 = get_raw_stream(0)
triton_poi_fused__log_softmax_0.run(arg1_1, buf0, 256, grid=grid(256), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [temp], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg]
triton_poi_fused__log_softmax_mul_neg_sum_1.run(buf0, arg0_1, buf1, 64, grid=grid(64), stream=stream0)
del arg0_1
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2; del buf2 # reuse
# Topologically Sorted Source Nodes: [temp, weight_mask, mul, sum_1, sum_2, loss], Original ATen: [aten._log_softmax, aten.mul, aten.sum, aten.neg, aten.ones_like, aten.div]
triton_per_fused__log_softmax_div_mul_neg_ones_like_sum_2.run(buf4, buf1, 1, 256, grid=grid(1), stream=stream0)
del buf1
return (buf4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class WeightedLoss(nn.Module):
def __init__(self):
super(WeightedLoss, self).__init__()
self.weighted = False
def generate_weight_mask(self, mask, to_ignore=None):
""" Generates a weight mask where pixel weights are inversely proportional to
how many pixels are in the class
@param mask: a [N x ...] torch.FloatTensor with values in {0, 1, 2, ..., K+1}, where K is number of objects. {0,1} are background/table.
@param to_ignore: a list of classes (integers) to ignore when creating mask
@return: a torch.FloatTensor that is same shape as mask.
"""
N = mask.shape[0]
if self.weighted:
weight_mask = torch.zeros_like(mask).float()
for i in range(N):
unique_object_labels = torch.unique(mask[i])
for obj in unique_object_labels:
if to_ignore is not None and obj in to_ignore:
continue
num_pixels = torch.sum(mask[i] == obj, dtype=torch.float)
weight_mask[i, mask[i] == obj] = 1 / num_pixels
else:
weight_mask = torch.ones_like(mask)
if to_ignore is not None:
for obj in to_ignore:
weight_mask[mask == obj] = 0
return weight_mask
class CELossWeighted(WeightedLoss):
""" Compute weighted CE loss with logits
"""
def __init__(self, weighted=False):
super(CELossWeighted, self).__init__()
self.CrossEntropyLoss = nn.CrossEntropyLoss(reduction='none')
self.weighted = weighted
def forward(self, x, target):
""" Compute weighted cross entropy
@param x: a [N x C x H x W] torch.FloatTensor of values
@param target: a [N x H x W] torch.LongTensor of values
"""
temp = self.CrossEntropyLoss(x, target)
weight_mask = self.generate_weight_mask(target)
loss = torch.sum(temp * weight_mask) / torch.sum(weight_mask)
return loss
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import math as tl_math
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused__log_softmax_0(in_ptr0, out_ptr0, xnumel, XBLOCK: tl.
constexpr):
xnumel = 256
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x3 = xindex
x0 = xindex % 16
x2 = xindex // 64
tmp0 = tl.load(in_ptr0 + x3, xmask)
tmp1 = tl.load(in_ptr0 + (x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp4 = tl.load(in_ptr0 + (32 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp6 = tl.load(in_ptr0 + (48 + x0 + 64 * x2), xmask, eviction_policy=
'evict_last')
tmp3 = triton_helpers.maximum(tmp1, tmp2)
tmp5 = triton_helpers.maximum(tmp3, tmp4)
tmp7 = triton_helpers.maximum(tmp5, tmp6)
tmp8 = tmp0 - tmp7
tl.store(out_ptr0 + x3, tmp8, xmask)
@triton.jit
def triton_poi_fused__log_softmax_mul_neg_sum_1(in_ptr0, in_ptr1, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x0 = xindex % 16
x1 = xindex // 16
x2 = xindex
tmp0 = tl.load(in_ptr0 + (x0 + 64 * x1), xmask)
tmp2 = tl.load(in_ptr0 + (16 + x0 + 64 * x1), xmask)
tmp5 = tl.load(in_ptr0 + (32 + x0 + 64 * x1), xmask)
tmp8 = tl.load(in_ptr0 + (48 + x0 + 64 * x1), xmask)
tmp13 = tl.load(in_ptr1 + (x0 + 64 * x1), xmask)
tmp16 = tl.load(in_ptr1 + (16 + x0 + 64 * x1), xmask)
tmp20 = tl.load(in_ptr1 + (32 + x0 + 64 * x1), xmask)
tmp24 = tl.load(in_ptr1 + (48 + x0 + 64 * x1), xmask)
tmp1 = tl_math.exp(tmp0)
tmp3 = tl_math.exp(tmp2)
tmp4 = tmp1 + tmp3
tmp6 = tl_math.exp(tmp5)
tmp7 = tmp4 + tmp6
tmp9 = tl_math.exp(tmp8)
tmp10 = tmp7 + tmp9
tmp11 = tl_math.log(tmp10)
tmp12 = tmp0 - tmp11
tmp14 = tmp12 * tmp13
tmp15 = tmp2 - tmp11
tmp17 = tmp15 * tmp16
tmp18 = tmp14 + tmp17
tmp19 = tmp5 - tmp11
tmp21 = tmp19 * tmp20
tmp22 = tmp18 + tmp21
tmp23 = tmp8 - tmp11
tmp25 = tmp23 * tmp24
tmp26 = tmp22 + tmp25
tmp27 = -tmp26
tl.store(out_ptr0 + x2, tmp27, xmask)
@triton.jit
def triton_per_fused__log_softmax_div_mul_neg_ones_like_sum_2(in_out_ptr0,
in_ptr0, xnumel, rnumel):
XBLOCK: tl.constexpr = 1
RBLOCK: tl.constexpr = 256
xoffset = tl.program_id(0) * XBLOCK
tl.full([1], xoffset, tl.int32)
tl.full([RBLOCK], True, tl.int1)
rindex = tl.arange(0, RBLOCK)[:]
tl.full([RBLOCK], True, tl.int1)
r0 = rindex % 64
tmp0 = tl.load(in_ptr0 + r0, None, eviction_policy='evict_last')
tmp1 = 1.0
tmp2 = tmp0 * tmp1
tmp3 = tl.broadcast_to(tmp2, [RBLOCK])
tmp5 = triton_helpers.promote_to_tensor(tl.sum(tmp3, 0))
tmp6 = tl.broadcast_to(tmp1, [RBLOCK])
tmp8 = triton_helpers.promote_to_tensor(tl.sum(tmp6, 0))
tmp9 = tmp5 / tmp8
tl.debug_barrier()
tl.store(in_out_ptr0 + tl.full([1], 0, tl.int32), tmp9, None)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(arg1_1, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused__log_softmax_0[grid(256)](arg1_1, buf0, 256,
XBLOCK=128, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
triton_poi_fused__log_softmax_mul_neg_sum_1[grid(64)](buf0, arg0_1,
buf1, 64, XBLOCK=64, num_warps=1, num_stages=1)
del arg0_1
del buf0
buf2 = empty_strided_cuda((), (), torch.float32)
buf4 = buf2
del buf2
triton_per_fused__log_softmax_div_mul_neg_ones_like_sum_2[grid(1)](buf4
, buf1, 1, 256, num_warps=2, num_stages=1)
del buf1
return buf4,
class WeightedLoss(nn.Module):
def __init__(self):
super(WeightedLoss, self).__init__()
self.weighted = False
def generate_weight_mask(self, mask, to_ignore=None):
""" Generates a weight mask where pixel weights are inversely proportional to
how many pixels are in the class
@param mask: a [N x ...] torch.FloatTensor with values in {0, 1, 2, ..., K+1}, where K is number of objects. {0,1} are background/table.
@param to_ignore: a list of classes (integers) to ignore when creating mask
@return: a torch.FloatTensor that is same shape as mask.
"""
N = mask.shape[0]
if self.weighted:
weight_mask = torch.zeros_like(mask).float()
for i in range(N):
unique_object_labels = torch.unique(mask[i])
for obj in unique_object_labels:
if to_ignore is not None and obj in to_ignore:
continue
num_pixels = torch.sum(mask[i] == obj, dtype=torch.float)
weight_mask[i, mask[i] == obj] = 1 / num_pixels
else:
weight_mask = torch.ones_like(mask)
if to_ignore is not None:
for obj in to_ignore:
weight_mask[mask == obj] = 0
return weight_mask
class CELossWeightedNew(WeightedLoss):
""" Compute weighted CE loss with logits
"""
def __init__(self, weighted=False):
super(CELossWeightedNew, self).__init__()
self.CrossEntropyLoss = nn.CrossEntropyLoss(reduction='none')
self.weighted = weighted
def forward(self, input_0, input_1):
arg0_1 = input_0
arg1_1 = input_1
output = call([arg0_1, arg1_1])
return output[0]
|
Guangyun-Xu/uois
|
CELossWeighted
| false | 13,729 |
[
"MIT"
] | 106 |
00069af841dd3ea9a86e6e3a89c3b7222240e6e5
|
https://github.com/Guangyun-Xu/uois/tree/00069af841dd3ea9a86e6e3a89c3b7222240e6e5
|
Conv2d_GN_ReLU
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/q7/cq7pxaiyrgc62grpa5ita4alscmzqq4bhtgaie42fm2yecjcuoou.py
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.native_group_norm, aten.relu, aten.threshold_backward]
# Source node to ATen node mapping:
# out_1 => add, add_1, mul_1, rsqrt, var_mean
# out_2 => relu
# Graph fragment:
# %var_mean : [num_users=2] = call_function[target=torch.ops.aten.var_mean.correction](args = (%view, [2, 3]), kwargs = {correction: 0, keepdim: True})
# %add : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%getitem, 1e-05), kwargs = {})
# %rsqrt : [num_users=2] = call_function[target=torch.ops.aten.rsqrt.default](args = (%add,), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%view_1, %unsqueeze_5), kwargs = {})
# %add_1 : [num_users=1] = call_function[target=torch.ops.aten.add.Tensor](args = (%mul_1, %unsqueeze_2), kwargs = {})
# %relu : [num_users=2] = call_function[target=torch.ops.aten.relu.default](args = (%add_1,), kwargs = {})
# %le : [num_users=1] = call_function[target=torch.ops.aten.le.Scalar](args = (%relu, 0), kwargs = {})
triton_per_fused_native_group_norm_relu_threshold_backward_0 = async_compile.triton('triton_per_fused_native_group_norm_relu_threshold_backward_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.persistent_reduction(
size_hints=[4, 64],
reduction_hint=ReductionHint.INNER,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: '*i1', 6: '*fp32', 7: 'i32', 8: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6, 8), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_per_fused_native_group_norm_relu_threshold_backward_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 3, 'num_reduction': 4, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False}
)
@triton.jit
def triton_per_fused_native_group_norm_relu_threshold_backward_0(in_ptr0, in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel, rnumel, XBLOCK : tl.constexpr):
xnumel = 4
rnumel = 64
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
roffset = 0
rmask = tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = (rindex // 16)
tmp0 = tl.load(in_ptr0 + (r1 + (64*x0)), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + (r3), None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (r3), None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tmp3 = tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp30 = 0.0
tmp31 = tmp29 <= tmp30
tl.store(out_ptr2 + (r1 + (64*x0)), tmp29, xmask)
tl.store(out_ptr3 + (r1 + (64*x0)), tmp31, xmask)
tl.store(out_ptr4 + (x0), tmp22, xmask)
tl.store(out_ptr0 + (x0), tmp10, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, ), (1, ))
assert_size_stride(primals_4, (4, ), (1, ))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
# Topologically Sorted Source Nodes: [out], Original ATen: [aten.convolution]
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1, 1), padding=(1, 1), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
# Topologically Sorted Source Nodes: [out_1, out_2], Original ATen: [aten.native_group_norm, aten.relu, aten.threshold_backward]
stream0 = get_raw_stream(0)
triton_per_fused_native_group_norm_relu_threshold_backward_0.run(buf0, primals_3, primals_4, buf1, buf5, buf6, buf4, 4, 64, grid=grid(4), stream=stream0)
del primals_4
return (buf5, primals_1, primals_2, primals_3, buf0, reinterpret_tensor(buf1, (4, 1), (1, 1), 0), reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf6, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((4, 4, 3, 3), (36, 9, 3, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, ), (1, ), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn as nn
class Conv2d_GN_ReLU(nn.Module):
""" Implements a module that performs
conv2d + groupnorm + ReLU +
Assumes kernel size is odd
"""
def __init__(self, in_channels, out_channels, num_groups, ksize=3, stride=1
):
super(Conv2d_GN_ReLU, self).__init__()
padding = 0 if ksize < 2 else ksize // 2
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize,
stride=stride, padding=padding, bias=False)
self.gn1 = nn.GroupNorm(num_groups, out_channels)
self.relu1 = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv1(x)
out = self.gn1(out)
out = self.relu1(out)
return out
def get_inputs():
return [torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'in_channels': 4, 'out_channels': 4, 'num_groups': 1}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
from torch._inductor.runtime import triton_helpers
from torch._inductor.runtime.triton_helpers import libdevice
import torch.nn as nn
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
@triton.jit
def triton_per_fused_native_group_norm_relu_threshold_backward_0(in_ptr0,
in_ptr1, in_ptr2, out_ptr0, out_ptr2, out_ptr3, out_ptr4, xnumel,
rnumel, XBLOCK: tl.constexpr):
xnumel = 4
RBLOCK: tl.constexpr = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
rindex = tl.arange(0, RBLOCK)[None, :]
tl.full([XBLOCK, RBLOCK], True, tl.int1)
r1 = rindex
x0 = xindex
r3 = rindex // 16
tmp0 = tl.load(in_ptr0 + (r1 + 64 * x0), xmask, other=0.0)
tmp24 = tl.load(in_ptr1 + r3, None, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + r3, None, eviction_policy='evict_last')
tmp1 = tl.broadcast_to(tmp0, [XBLOCK, RBLOCK])
tl.where(xmask, tmp1, 0)
tmp4 = tl.broadcast_to(tmp1, [XBLOCK, RBLOCK])
tmp6 = tl.where(xmask, tmp4, 0)
tmp7 = tl.sum(tmp6, 1)[:, None]
tmp8 = tl.full([XBLOCK, 1], 64, tl.int32)
tmp9 = tmp8.to(tl.float32)
tmp10 = tmp7 / tmp9
tmp11 = tmp1 - tmp10
tmp12 = tmp11 * tmp11
tmp13 = tl.broadcast_to(tmp12, [XBLOCK, RBLOCK])
tmp15 = tl.where(xmask, tmp13, 0)
tmp16 = tl.sum(tmp15, 1)[:, None]
tmp17 = tmp0 - tmp10
tmp18 = 64.0
tmp19 = tmp16 / tmp18
tmp20 = 1e-05
tmp21 = tmp19 + tmp20
tmp22 = libdevice.rsqrt(tmp21)
tmp23 = tmp17 * tmp22
tmp25 = tmp23 * tmp24
tmp27 = tmp25 + tmp26
tmp28 = tl.full([1, 1], 0, tl.int32)
tmp29 = triton_helpers.maximum(tmp28, tmp27)
tmp30 = 0.0
tmp31 = tmp29 <= tmp30
tl.store(out_ptr2 + (r1 + 64 * x0), tmp29, xmask)
tl.store(out_ptr3 + (r1 + 64 * x0), tmp31, xmask)
tl.store(out_ptr4 + x0, tmp22, xmask)
tl.store(out_ptr0 + x0, tmp10, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (4, 4, 3, 3), (36, 9, 3, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4,), (1,))
assert_size_stride(primals_4, (4,), (1,))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = extern_kernels.convolution(primals_2, primals_1, stride=(1,
1), padding=(1, 1), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf0, (4, 4, 4, 4), (64, 16, 4, 1))
buf1 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
buf5 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.float32)
buf6 = empty_strided_cuda((4, 4, 4, 4), (64, 16, 4, 1), torch.bool)
buf4 = empty_strided_cuda((4, 1, 1, 1), (1, 4, 4, 4), torch.float32)
get_raw_stream(0)
triton_per_fused_native_group_norm_relu_threshold_backward_0[grid(4)](
buf0, primals_3, primals_4, buf1, buf5, buf6, buf4, 4, 64,
XBLOCK=1, num_warps=2, num_stages=1)
del primals_4
return buf5, primals_1, primals_2, primals_3, buf0, reinterpret_tensor(buf1
, (4, 1), (1, 1), 0), reinterpret_tensor(buf4, (4, 1), (1, 1), 0), buf6
class Conv2d_GN_ReLUNew(nn.Module):
""" Implements a module that performs
conv2d + groupnorm + ReLU +
Assumes kernel size is odd
"""
def __init__(self, in_channels, out_channels, num_groups, ksize=3, stride=1
):
super(Conv2d_GN_ReLUNew, self).__init__()
padding = 0 if ksize < 2 else ksize // 2
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=ksize,
stride=stride, padding=padding, bias=False)
self.gn1 = nn.GroupNorm(num_groups, out_channels)
self.relu1 = nn.ReLU(inplace=True)
def forward(self, input_0):
primals_1 = self.conv1.weight
primals_3 = self.gn1.weight
primals_4 = self.gn1.bias
primals_2 = input_0
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
Guangyun-Xu/uois
|
Conv2d_GN_ReLU
| false | 13,730 |
[
"MIT"
] | 106 |
00069af841dd3ea9a86e6e3a89c3b7222240e6e5
|
https://github.com/Guangyun-Xu/uois/tree/00069af841dd3ea9a86e6e3a89c3b7222240e6e5
|
CosAttention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/tn/ctnflfpgkxcj4tzximbprecg4kfrgg46s62bv5hdfn6wxh7xshw6.py
# Topologically Sorted Source Nodes: [mul, mul_1, alpha, alpha_1], Original ATen: [aten.mul, aten.sum]
# Source node to ATen node mapping:
# alpha => mul_2
# alpha_1 => sum_1
# mul => mul
# mul_1 => mul_1
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul, %primals_3), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%mul_1, %primals_4), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_2, [-1]), kwargs = {})
triton_poi_fused_mul_sum_0 = async_compile.triton('triton_poi_fused_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*fp32', 5: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + (4*x2), xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp20 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp24 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp28 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp9 = tmp7 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 * tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 * tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 * tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 * tmp26
tmp29 = tmp27 * tmp28
tmp30 = tmp22 + tmp29
tl.store(out_ptr0 + (x2), tmp30, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, mul_1, alpha, alpha_1], Original ATen: [aten.mul, aten.sum]
stream0 = get_raw_stream(0)
triton_poi_fused_mul_sum_0.run(primals_2, primals_1, primals_3, primals_4, buf0, 64, grid=grid(64), stream=stream0)
return (buf0, primals_1, primals_2, primals_3, primals_4, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
class ConstAttention(nn.Module):
def __init__(self, **kwargs):
super(ConstAttention, self).__init__()
def forward(self, neighbor_vecs, self_vecs):
return 1
class GatAttention(ConstAttention):
def __init__(self, num_heads, out_channels):
super(GatAttention, self).__init__()
self.num_heads = num_heads
self.out_channels = out_channels
self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, neighbor_vecs, self_vecs):
alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs
* self.att_neighbor_weight).sum(dim=-1)
alpha = F.leaky_relu(alpha, negative_slope=0.2)
return alpha
class CosAttention(GatAttention):
def forward(self, neighbor_vecs, self_vecs):
alpha = (neighbor_vecs * self.att_neighbor_weight * self_vecs *
self.att_self_weight)
alpha = alpha.sum(dim=-1)
return alpha
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_heads': 4, 'out_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr0,
xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last')
tmp5 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp10 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp16 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp18 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp20 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp24 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp28 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp4 = tmp2 * tmp3
tmp6 = tmp4 * tmp5
tmp9 = tmp7 * tmp8
tmp11 = tmp9 * tmp10
tmp13 = tmp11 * tmp12
tmp14 = tmp6 + tmp13
tmp17 = tmp15 * tmp16
tmp19 = tmp17 * tmp18
tmp21 = tmp19 * tmp20
tmp22 = tmp14 + tmp21
tmp25 = tmp23 * tmp24
tmp27 = tmp25 * tmp26
tmp29 = tmp27 * tmp28
tmp30 = tmp22 + tmp29
tl.store(out_ptr0 + x2, tmp30, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_4, (1, 4, 4), (16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_mul_sum_0[grid(64)](primals_2, primals_1,
primals_3, primals_4, buf0, 64, XBLOCK=64, num_warps=1,
num_stages=1)
return buf0, primals_1, primals_2, primals_3, primals_4
class ConstAttention(nn.Module):
def __init__(self, **kwargs):
super(ConstAttention, self).__init__()
def forward(self, neighbor_vecs, self_vecs):
return 1
class GatAttention(ConstAttention):
def __init__(self, num_heads, out_channels):
super(GatAttention, self).__init__()
self.num_heads = num_heads
self.out_channels = out_channels
self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, neighbor_vecs, self_vecs):
alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs
* self.att_neighbor_weight).sum(dim=-1)
alpha = F.leaky_relu(alpha, negative_slope=0.2)
return alpha
class CosAttentionNew(GatAttention):
def forward(self, input_0, input_1):
primals_1 = self.att_self_weight
primals_4 = self.att_neighbor_weight
primals_2 = input_0
primals_3 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
GraphNAS/GraphNAS
|
CosAttention
| false | 13,731 |
[
"Apache-2.0"
] | 94 |
b4f05bb10b8b96bb9e82344bfae36a23db2431a6
|
https://github.com/GraphNAS/GraphNAS/tree/b4f05bb10b8b96bb9e82344bfae36a23db2431a6
|
Downsampler
|
# AOT ID: ['0_inference']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/k4/ck4qv3kxa3iieaknofb7rptujn3fj2obyj755mxtsm3f3rqxtqie.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%arg1_1, %arg0_1, None, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_0 = async_compile.triton('triton_poi_fused_convolution_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 4096], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 4096
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (4096*y3)), ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (16384*y1)), tmp0, ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/h5/ch5dft2hubkvmhhqb6q77c5hhllzq4ffi4ueka6mzp6wpdcltr7b.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%arg1_1, %arg0_1, None, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_1 = async_compile.triton('triton_poi_fused_convolution_1', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 64], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_1', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = (yindex // 4)
tmp0 = tl.load(in_ptr0 + (x2 + (49*y3)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (y0 + (4*x2) + (196*y1)), tmp0, xmask & ymask)
''', device_str='cuda')
# kernel path: runs/run_shard_0/inductor_cache/x5/cx5sr4v4ymsdbw5ymzwozjogpcuj6v5j24cmplokeoteweefs3u6.py
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
# Source node to ATen node mapping:
# conv2d => convolution
# Graph fragment:
# %convolution : [num_users=1] = call_function[target=torch.ops.aten.convolution.default](args = (%arg1_1, %arg0_1, None, [4, 4], [0, 0], [1, 1], False, [0, 0], 1), kwargs = {})
triton_poi_fused_convolution_2 = async_compile.triton('triton_poi_fused_convolution_2', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[16, 256], tile_hint=TileHint.SQUARE,
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: 'i32', 3: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_convolution_2', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 1, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel, YBLOCK : tl.constexpr, XBLOCK : tl.constexpr):
ynumel = 16
xnumel = 225
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = (yindex // 4)
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + (4*x2) + (900*y1)), xmask & ymask, eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + (225*y3)), tmp0, xmask & ymask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(arg1_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
stream0 = get_raw_stream(0)
triton_poi_fused_convolution_0.run(arg1_1, buf0, 16, 4096, grid=grid(16, 4096), stream=stream0)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 7, 7), (196, 1, 28, 4), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_1.run(arg0_1, buf1, 16, 49, grid=grid(16, 49), stream=stream0)
del arg0_1
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
buf2 = extern_kernels.convolution(buf0, buf1, stride=(4, 4), padding=(0, 0), dilation=(1, 1), transposed=False, output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 15, 15), (900, 1, 60, 4))
del buf0
del buf1
buf3 = empty_strided_cuda((4, 4, 15, 15), (900, 225, 15, 1), torch.float32)
# Topologically Sorted Source Nodes: [conv2d], Original ATen: [aten.convolution]
triton_poi_fused_convolution_2.run(buf2, buf3, 16, 225, grid=grid(16, 225), stream=stream0)
del buf2
return (buf3, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
arg0_1 = rand_strided((4, 4, 7, 7), (196, 49, 7, 1), device='cuda:0', dtype=torch.float32)
arg1_1 = rand_strided((4, 4, 64, 64), (16384, 4096, 64, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([arg0_1, arg1_1])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
def bilinear_kernel(size, normalize=False):
"""
Make a 2D bilinear kernel suitable for upsampling/downsampling with
normalize=False/True. The kernel is size x size square.
Take
size: kernel size (square)
normalize: whether kernel sums to 1 (True) or not
Give
kernel: np.array with bilinear kernel coefficient
"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
kernel = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) /
factor)
if normalize:
kernel /= kernel.sum()
return kernel
class Interpolator(nn.Module):
"""
Interpolate by de/up/backward convolution with a bilinear kernel.
Take
channel_dim: the input channel dimension
rate: upsampling rate, that is 4 -> 4x upsampling
odd: the kernel parity, which is too much to explain here for now, but
will be handled automagically in the future, promise.
normalize: whether kernel sums to 1
"""
def __init__(self, channel_dim, rate, odd=True, normalize=False):
super().__init__()
self.rate = rate
ksize = rate * 2
if odd:
ksize -= 1
kernel = torch.from_numpy(bilinear_kernel(ksize, normalize))
weight = torch.zeros(channel_dim, channel_dim, ksize, ksize)
for k in range(channel_dim):
weight[k, k] = kernel
self.weight = nn.Parameter(weight, requires_grad=False)
def forward(self, x):
return F.conv_transpose2d(x, self.weight, stride=self.rate)
class Downsampler(Interpolator):
"""
Downsample with a normalized bilinear kernel.
"""
def __init__(self, channel_dim, rate, odd=True):
super().__init__(channel_dim, rate, odd, True)
def forward(self, x):
return F.conv2d(x, self.weight, stride=self.rate)
def get_inputs():
return [torch.rand([4, 4, 64, 64])]
def get_init_inputs():
return [[], {'channel_dim': 4, 'rate': 4}]
|
import torch
from torch._inductor.select_algorithm import extern_kernels
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_convolution_0(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
tl.full([XBLOCK, YBLOCK], True, tl.int1)
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 4096 * y3), ymask, eviction_policy=
'evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 16384 * y1), tmp0, ymask)
@triton.jit
def triton_poi_fused_convolution_1(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 49
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y3 = yindex
y0 = yindex % 4
y1 = yindex // 4
tmp0 = tl.load(in_ptr0 + (x2 + 49 * y3), xmask & ymask, eviction_policy
='evict_last')
tl.store(out_ptr0 + (y0 + 4 * x2 + 196 * y1), tmp0, xmask & ymask)
@triton.jit
def triton_poi_fused_convolution_2(in_ptr0, out_ptr0, ynumel, xnumel,
YBLOCK: tl.constexpr, XBLOCK: tl.constexpr):
ynumel = 16
xnumel = 225
yoffset = tl.program_id(1) * YBLOCK
yindex = yoffset + tl.arange(0, YBLOCK)[None, :]
ymask = yindex < ynumel
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
x2 = xindex
y0 = yindex % 4
y1 = yindex // 4
y3 = yindex
tmp0 = tl.load(in_ptr0 + (y0 + 4 * x2 + 900 * y1), xmask & ymask,
eviction_policy='evict_last')
tl.store(out_ptr0 + (x2 + 225 * y3), tmp0, xmask & ymask)
def call(args):
arg0_1, arg1_1 = args
args.clear()
assert_size_stride(arg0_1, (4, 4, 7, 7), (196, 49, 7, 1))
assert_size_stride(arg1_1, (4, 4, 64, 64), (16384, 4096, 64, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf0 = empty_strided_cuda((4, 4, 64, 64), (16384, 1, 256, 4), torch
.float32)
get_raw_stream(0)
triton_poi_fused_convolution_0[grid(16, 4096)](arg1_1, buf0, 16,
4096, XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del arg1_1
buf1 = empty_strided_cuda((4, 4, 7, 7), (196, 1, 28, 4), torch.float32)
triton_poi_fused_convolution_1[grid(16, 49)](arg0_1, buf1, 16, 49,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del arg0_1
buf2 = extern_kernels.convolution(buf0, buf1, stride=(4, 4),
padding=(0, 0), dilation=(1, 1), transposed=False,
output_padding=(0, 0), groups=1, bias=None)
assert_size_stride(buf2, (4, 4, 15, 15), (900, 1, 60, 4))
del buf0
del buf1
buf3 = empty_strided_cuda((4, 4, 15, 15), (900, 225, 15, 1), torch.
float32)
triton_poi_fused_convolution_2[grid(16, 225)](buf2, buf3, 16, 225,
XBLOCK=64, YBLOCK=16, num_warps=4, num_stages=1)
del buf2
return buf3,
def bilinear_kernel(size, normalize=False):
"""
Make a 2D bilinear kernel suitable for upsampling/downsampling with
normalize=False/True. The kernel is size x size square.
Take
size: kernel size (square)
normalize: whether kernel sums to 1 (True) or not
Give
kernel: np.array with bilinear kernel coefficient
"""
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
kernel = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) /
factor)
if normalize:
kernel /= kernel.sum()
return kernel
class Interpolator(nn.Module):
"""
Interpolate by de/up/backward convolution with a bilinear kernel.
Take
channel_dim: the input channel dimension
rate: upsampling rate, that is 4 -> 4x upsampling
odd: the kernel parity, which is too much to explain here for now, but
will be handled automagically in the future, promise.
normalize: whether kernel sums to 1
"""
def __init__(self, channel_dim, rate, odd=True, normalize=False):
super().__init__()
self.rate = rate
ksize = rate * 2
if odd:
ksize -= 1
kernel = torch.from_numpy(bilinear_kernel(ksize, normalize))
weight = torch.zeros(channel_dim, channel_dim, ksize, ksize)
for k in range(channel_dim):
weight[k, k] = kernel
self.weight = nn.Parameter(weight, requires_grad=False)
def forward(self, x):
return F.conv_transpose2d(x, self.weight, stride=self.rate)
class DownsamplerNew(Interpolator):
"""
Downsample with a normalized bilinear kernel.
"""
def __init__(self, channel_dim, rate, odd=True):
super().__init__(channel_dim, rate, odd, True)
def forward(self, input_0):
arg0_1 = self.weight
arg1_1 = input_0
output = call([arg0_1, arg1_1])
return output[0]
|
Global19/revolver
|
Downsampler
| false | 13,732 |
[
"BSD-2-Clause"
] | 151 |
200082798d862516de6d9aa18e863a5968127a3f
|
https://github.com/Global19/revolver/tree/200082798d862516de6d9aa18e863a5968127a3f
|
GatAttention
|
# AOT ID: ['0_forward']
from ctypes import c_void_p, c_long, c_int
import torch
import math
import random
import os
import tempfile
from math import inf, nan
from torch._inductor.hooks import run_intermediate_hooks
from torch._inductor.utils import maybe_profile
from torch._inductor.codegen.memory_planning import _align as align
from torch import device, empty_strided
from torch._inductor.async_compile import AsyncCompile
from torch._inductor.select_algorithm import extern_kernels
from torch._inductor.codegen.multi_kernel import MultiKernelCall
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid, split_scan_grid, grid_combo_kernels, start_graph, end_graph
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
aten = torch.ops.aten
inductor_ops = torch.ops.inductor
_quantized = torch.ops._quantized
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cpu = torch._C._dynamo.guards._empty_strided_cpu
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
empty_strided_xpu = torch._C._dynamo.guards._empty_strided_xpu
reinterpret_tensor = torch._C._dynamo.guards._reinterpret_tensor
alloc_from_pool = torch.ops.inductor._alloc_from_pool
async_compile = AsyncCompile()
# kernel path: runs/run_shard_0/inductor_cache/ex/cexco2jwic5z7azgywysopaagdfks6vfj2d5siyxxphq6tzqdb2c.py
# Topologically Sorted Source Nodes: [mul, sum_1, mul_1, sum_2, alpha, alpha_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.leaky_relu]
# Source node to ATen node mapping:
# alpha => add
# alpha_1 => gt, mul_2, where
# mul => mul
# mul_1 => mul_1
# sum_1 => sum_1
# sum_2 => sum_2
# Graph fragment:
# %mul : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_2, %primals_1), kwargs = {})
# %sum_1 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul, [-1]), kwargs = {})
# %mul_1 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%primals_4, %primals_3), kwargs = {})
# %sum_2 : [num_users=1] = call_function[target=torch.ops.aten.sum.dim_IntList](args = (%mul_1, [-1]), kwargs = {})
# %add : [num_users=3] = call_function[target=torch.ops.aten.add.Tensor](args = (%sum_1, %sum_2), kwargs = {})
# %gt : [num_users=2] = call_function[target=torch.ops.aten.gt.Scalar](args = (%add, 0), kwargs = {})
# %mul_2 : [num_users=1] = call_function[target=torch.ops.aten.mul.Tensor](args = (%add, 0.2), kwargs = {})
# %where : [num_users=1] = call_function[target=torch.ops.aten.where.self](args = (%gt, %add, %mul_2), kwargs = {})
triton_poi_fused_add_leaky_relu_mul_sum_0 = async_compile.triton('triton_poi_fused_add_leaky_relu_mul_sum_0', '''
import triton
import triton.language as tl
from triton.compiler.compiler import AttrsDescriptor
from torch._inductor.runtime import triton_helpers, triton_heuristics
from torch._inductor.runtime.triton_helpers import libdevice, math as tl_math
from torch._inductor.runtime.hints import AutotuneHint, ReductionHint, TileHint, instance_descriptor, DeviceProperties
@triton_heuristics.pointwise(
size_hints=[64],
filename=__file__,
triton_meta={'signature': {0: '*fp32', 1: '*fp32', 2: '*fp32', 3: '*fp32', 4: '*i1', 5: '*fp32', 6: 'i32'}, 'device': DeviceProperties(type='cuda', index=0, cc=80, major=8, regs_per_multiprocessor=65536, max_threads_per_multi_processor=2048, multi_processor_count=108), 'constants': {}, 'configs': [AttrsDescriptor(divisible_by_16=(0, 1, 2, 3, 4, 5, 6), equal_to_1=())]},
inductor_meta={'autotune_hints': set(), 'kernel_name': 'triton_poi_fused_add_leaky_relu_mul_sum_0', 'mutated_arg_names': [], 'no_x_dim': False, 'num_load': 16, 'num_reduction': 0, 'backend_hash': 'A9C866B4A14FD3277824029365D703C2427B2E685E54EC9B3EF4ADC8D1EEAC1D', 'are_deterministic_algorithms_enabled': False, 'assert_indirect_indexing': True, 'autotune_local_cache': True, 'autotune_pointwise': True, 'autotune_remote_cache': None, 'force_disable_caches': False, 'dynamic_scale_rblock': True, 'max_autotune': False, 'max_autotune_pointwise': False, 'min_split_scan_rblock': 256, 'spill_threshold': 16, 'store_cubin': False},
min_elem_per_thread=0
)
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_sum_0(in_ptr0, in_ptr1, in_ptr2, in_ptr3, out_ptr1, out_ptr2, xnumel, XBLOCK : tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + (4*x2), xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + (4*x0), xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp12 = tl.load(in_ptr1 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp15 = tl.load(in_ptr2 + (4*x2), xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr3 + (4*x0), xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (1 + (4*x2)), xmask, eviction_policy='evict_last')
tmp19 = tl.load(in_ptr3 + (1 + (4*x0)), xmask, eviction_policy='evict_last')
tmp22 = tl.load(in_ptr2 + (2 + (4*x2)), xmask, eviction_policy='evict_last')
tmp23 = tl.load(in_ptr3 + (2 + (4*x0)), xmask, eviction_policy='evict_last')
tmp26 = tl.load(in_ptr2 + (3 + (4*x2)), xmask, eviction_policy='evict_last')
tmp27 = tl.load(in_ptr3 + (3 + (4*x0)), xmask, eviction_policy='evict_last')
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tmp14 + tmp29
tmp31 = 0.0
tmp32 = tmp30 > tmp31
tmp33 = 0.2
tmp34 = tmp30 * tmp33
tmp35 = tl.where(tmp32, tmp30, tmp34)
tl.store(out_ptr1 + (x2), tmp32, xmask)
tl.store(out_ptr2 + (x2), tmp35, xmask)
''', device_str='cuda')
async_compile.wait(globals())
del async_compile
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
# Topologically Sorted Source Nodes: [mul, sum_1, mul_1, sum_2, alpha, alpha_1], Original ATen: [aten.mul, aten.sum, aten.add, aten.leaky_relu]
stream0 = get_raw_stream(0)
triton_poi_fused_add_leaky_relu_mul_sum_0.run(primals_2, primals_1, primals_4, primals_3, buf1, buf2, 64, grid=grid(64), stream=stream0)
del primals_1
del primals_3
return (buf2, primals_2, primals_4, buf1, )
def benchmark_compiled_module(times=10, repeat=10):
from torch._dynamo.testing import rand_strided
from torch._inductor.utils import print_performance
primals_1 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_2 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_3 = rand_strided((1, 4, 4), (16, 4, 1), device='cuda:0', dtype=torch.float32)
primals_4 = rand_strided((4, 4, 4, 4), (64, 16, 4, 1), device='cuda:0', dtype=torch.float32)
fn = lambda: call([primals_1, primals_2, primals_3, primals_4])
return print_performance(fn, times=times, repeat=repeat)
if __name__ == "__main__":
from torch._inductor.wrapper_benchmark import compiled_module_main
compiled_module_main('None', benchmark_compiled_module)
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Parameter
class ConstAttention(nn.Module):
def __init__(self, **kwargs):
super(ConstAttention, self).__init__()
def forward(self, neighbor_vecs, self_vecs):
return 1
class GatAttention(ConstAttention):
def __init__(self, num_heads, out_channels):
super(GatAttention, self).__init__()
self.num_heads = num_heads
self.out_channels = out_channels
self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, neighbor_vecs, self_vecs):
alpha = (self_vecs * self.att_self_weight).sum(dim=-1) + (neighbor_vecs
* self.att_neighbor_weight).sum(dim=-1)
alpha = F.leaky_relu(alpha, negative_slope=0.2)
return alpha
def get_inputs():
return [torch.rand([4, 4, 4, 4]), torch.rand([4, 4, 4, 4])]
def get_init_inputs():
return [[], {'num_heads': 4, 'out_channels': 4}]
|
import torch
import triton
import triton.language as tl
from torch._inductor.runtime.triton_heuristics import grid
from torch._C import _cuda_getCurrentRawStream as get_raw_stream
import torch.nn as nn
from torch.nn import Parameter
assert_size_stride = torch._C._dynamo.guards.assert_size_stride
empty_strided_cuda = torch._C._dynamo.guards._empty_strided_cuda
@triton.jit
def triton_poi_fused_add_leaky_relu_mul_sum_0(in_ptr0, in_ptr1, in_ptr2,
in_ptr3, out_ptr1, out_ptr2, xnumel, XBLOCK: tl.constexpr):
xnumel = 64
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:]
xmask = xindex < xnumel
x2 = xindex
x0 = xindex % 4
tmp0 = tl.load(in_ptr0 + 4 * x2, xmask, eviction_policy='evict_last')
tmp1 = tl.load(in_ptr1 + 4 * x0, xmask, eviction_policy='evict_last')
tmp3 = tl.load(in_ptr0 + (1 + 4 * x2), xmask, eviction_policy='evict_last')
tmp4 = tl.load(in_ptr1 + (1 + 4 * x0), xmask, eviction_policy='evict_last')
tmp7 = tl.load(in_ptr0 + (2 + 4 * x2), xmask, eviction_policy='evict_last')
tmp8 = tl.load(in_ptr1 + (2 + 4 * x0), xmask, eviction_policy='evict_last')
tmp11 = tl.load(in_ptr0 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp12 = tl.load(in_ptr1 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp15 = tl.load(in_ptr2 + 4 * x2, xmask, eviction_policy='evict_last')
tmp16 = tl.load(in_ptr3 + 4 * x0, xmask, eviction_policy='evict_last')
tmp18 = tl.load(in_ptr2 + (1 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp19 = tl.load(in_ptr3 + (1 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp22 = tl.load(in_ptr2 + (2 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp23 = tl.load(in_ptr3 + (2 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp26 = tl.load(in_ptr2 + (3 + 4 * x2), xmask, eviction_policy='evict_last'
)
tmp27 = tl.load(in_ptr3 + (3 + 4 * x0), xmask, eviction_policy='evict_last'
)
tmp2 = tmp0 * tmp1
tmp5 = tmp3 * tmp4
tmp6 = tmp2 + tmp5
tmp9 = tmp7 * tmp8
tmp10 = tmp6 + tmp9
tmp13 = tmp11 * tmp12
tmp14 = tmp10 + tmp13
tmp17 = tmp15 * tmp16
tmp20 = tmp18 * tmp19
tmp21 = tmp17 + tmp20
tmp24 = tmp22 * tmp23
tmp25 = tmp21 + tmp24
tmp28 = tmp26 * tmp27
tmp29 = tmp25 + tmp28
tmp30 = tmp14 + tmp29
tmp31 = 0.0
tmp32 = tmp30 > tmp31
tmp33 = 0.2
tmp34 = tmp30 * tmp33
tmp35 = tl.where(tmp32, tmp30, tmp34)
tl.store(out_ptr1 + x2, tmp32, xmask)
tl.store(out_ptr2 + x2, tmp35, xmask)
def call(args):
primals_1, primals_2, primals_3, primals_4 = args
args.clear()
assert_size_stride(primals_1, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_2, (4, 4, 4, 4), (64, 16, 4, 1))
assert_size_stride(primals_3, (1, 4, 4), (16, 4, 1))
assert_size_stride(primals_4, (4, 4, 4, 4), (64, 16, 4, 1))
with torch.cuda._DeviceGuard(0):
torch.cuda.set_device(0)
buf1 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.bool)
buf2 = empty_strided_cuda((4, 4, 4), (16, 4, 1), torch.float32)
get_raw_stream(0)
triton_poi_fused_add_leaky_relu_mul_sum_0[grid(64)](primals_2,
primals_1, primals_4, primals_3, buf1, buf2, 64, XBLOCK=64,
num_warps=1, num_stages=1)
del primals_1
del primals_3
return buf2, primals_2, primals_4, buf1
class ConstAttention(nn.Module):
def __init__(self, **kwargs):
super(ConstAttention, self).__init__()
def forward(self, neighbor_vecs, self_vecs):
return 1
class GatAttentionNew(ConstAttention):
def __init__(self, num_heads, out_channels):
super(GatAttentionNew, self).__init__()
self.num_heads = num_heads
self.out_channels = out_channels
self.att_self_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.att_neighbor_weight = Parameter(torch.Tensor(1, self.num_heads,
self.out_channels))
self.reset_parameters()
def reset_parameters(self):
pass
def forward(self, input_0, input_1):
primals_1 = self.att_self_weight
primals_3 = self.att_neighbor_weight
primals_2 = input_0
primals_4 = input_1
output = call([primals_1, primals_2, primals_3, primals_4])
return output[0]
|
GraphNAS/GraphNAS
|
GatAttention
| false | 13,733 |
[
"Apache-2.0"
] | 94 |
b4f05bb10b8b96bb9e82344bfae36a23db2431a6
|
https://github.com/GraphNAS/GraphNAS/tree/b4f05bb10b8b96bb9e82344bfae36a23db2431a6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.