text
stringlengths
1
2.05k
class ScalarLoop(torch.nn.Module): """ScalarLoop module""" def forward(self, inp): """forward""" a = 0 for i in range(inp.size(0)): b = i * i b = b + 1 a += b if a != 0: a += 1 else: a += 2 return a
class SimpleLoop(torch.nn.Module): def forward(self, inp): a = inp for _ in range(inp.size(0)): b = a * 2.0 c = a + b a += c return a
class LoopWithIf(torch.nn.Module): """LoopWithIf module""" def forward(self, inp): a = inp for _ in range(inp.size(0)): b = a * 2.0 b = a + b if b.sum() > 0.0: a += b else: a -= b return a
class NestedLoop(torch.nn.Module): def forward(self, inp): a = inp for i in range(inp.size(0)): b = a * float(i) for j in range(inp.size(1)): a += b * float(j) return a
class SimpleScalarWhileLoop(torch.nn.Module): """SimpleScalarWhileLoop module""" def forward(self, inp): """forward""" a = 1 i = 0 while i <= inp.size(0): a += i i += 2 i = 0 while i < 10: a += i i += 3 return a
class SimpleWhileLoop(torch.nn.Module): def forward(self, inp): a = inp i = 0 while i < inp.size(0): a += a * float(i) * 2.0 i += 1 return a models = [ SimpleIf(10, 20), NestedIf(10, 20), ScalarLoop(), SimpleLoop(), LoopWithIf(), SimpleScalarWhileLoop(), SimpleWhileLoop(), NestedLoop(), ] for pt_model in models: verify_script_model(pt_model.eval(), [(10, 20)], _get_default_vm_targets()) @tvm.testing.uses_gpu def test_simple_rnn(): """test_simple_rnn"""
class DecisionGate(torch.nn.Module): def forward(self, x): if x.sum() > 0: return x else: return -x
class Cell(torch.nn.Module): def __init__(self, dg): super().__init__() self.dg = dg self.linear = torch.nn.Linear(4, 4) def forward(self, x, h): new_h = torch.tanh(self.dg(self.linear(x)) + h) return new_h, new_h
class RNNLoop(torch.nn.Module): """Pytorch RNNLoop module""" def __init__(self): super().__init__() x = torch.rand(10, 4, dtype=torch.float) h = torch.rand(10, 4, dtype=torch.float) self.cell = torch.jit.trace(Cell(DecisionGate()), (x, h)) def forward(self, xs): h = torch.zeros(10, 4, dtype=torch.float) y = torch.zeros(10, 4, dtype=torch.float) for i in range(xs.size(0)): y, h = self.cell(xs[i], h) return y verify_script_model(RNNLoop().eval(), [(10, 10, 4)], _get_default_vm_targets()) @tvm.testing.uses_gpu def test_forward_reduce_sum(): """test_forward_reduce_sum""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10]
class ReduceSum1(Module): def forward(self, *args): return args[0].sum(1)
class ReduceSum2(Module): def forward(self, *args): return args[0].sum(dim=1, keepdim=False)
class ReduceSum3(Module): def forward(self, *args): return args[0].sum(dim=2, keepdim=True)
class ReduceSum4(Module): def forward(self, *args): return args[0].sum(dim=(2, 3), keepdim=True)
class ReduceSum5(Module): def forward(self, *args): return args[0].sum(dim=(2, 3), keepdim=False) input_data = torch.rand(input_shape).float() verify_model(ReduceSum1().float().eval(), input_data=input_data) verify_model(ReduceSum2().float().eval(), input_data=input_data) verify_model(ReduceSum3().float().eval(), input_data=input_data) verify_model(ReduceSum4().float().eval(), input_data=input_data) verify_model(ReduceSum5().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_reduce_prod(): """test_forward_reduce_prod""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10]
class ReduceProd1(Module): def forward(self, *args): return args[0].prod(1)
class ReduceProd2(Module): def forward(self, *args): return args[0].prod(dim=1, keepdim=False)
class ReduceProd3(Module): def forward(self, *args): return args[0].prod(dim=2, keepdim=True) input_data = torch.rand(input_shape).float() verify_model(ReduceProd1().float().eval(), input_data=input_data) verify_model(ReduceProd2().float().eval(), input_data=input_data) verify_model(ReduceProd3().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_argmin(): """test_forward_argmin""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10]
class ArgMin1(Module): def forward(self, *args): return args[0].argmin(1)
class ArgMin2(Module): def forward(self, *args): return args[0].argmin(dim=1, keepdim=False)
class ArgMin3(Module): def forward(self, *args): return args[0].argmin(dim=2, keepdim=True) input_data = torch.rand(input_shape).float() verify_model(ArgMin1().float().eval(), input_data=input_data) verify_model(ArgMin2().float().eval(), input_data=input_data) verify_model(ArgMin3().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_argmax(): """test_forward_argmax""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10]
class ArgMax1(Module): def forward(self, *args): return args[0].argmax(1)
class ArgMax2(Module): def forward(self, *args): return args[0].argmax(dim=1, keepdim=False)
class ArgMax3(Module): def forward(self, *args): return args[0].argmax(dim=2, keepdim=True) input_data = torch.rand(input_shape).float() verify_model(ArgMax1().float().eval(), input_data=input_data) verify_model(ArgMax2().float().eval(), input_data=input_data) verify_model(ArgMax3().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_std(): """test_forward_std""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10]
class Std1(Module): def forward(self, *args): return args[0].std(1, unbiased=False)
class Std2(Module): def forward(self, *args): return args[0].std(dim=1, keepdim=False, unbiased=False)
class Std3(Module): def forward(self, *args): return args[0].std(dim=2, keepdim=True, unbiased=False)
class Std4(Module): def forward(self, *args): return args[0].std(dim=(2, 3), keepdim=True, unbiased=False)
class Std5(Module): def forward(self, *args): return args[0].std(dim=(2, 3), keepdim=False, unbiased=False)
class Std6(Module): def forward(self, *args): return args[0].std(unbiased=False)
class Std7(Module): def forward(self, *args): return args[0].std(dim=1, keepdim=False, unbiased=True)
class Std8(Module): def forward(self, *args): return args[0].std(dim=(2, 3), keepdim=True, unbiased=True)
class Std9(Module): def forward(self, *args): return args[0].std(unbiased=True) input_data = torch.rand(input_shape).float() verify_model(Std1().float().eval(), input_data=input_data) verify_model(Std2().float().eval(), input_data=input_data) verify_model(Std3().float().eval(), input_data=input_data) verify_model(Std4().float().eval(), input_data=input_data) verify_model(Std5().float().eval(), input_data=input_data) verify_model(Std6().float().eval(), input_data=input_data) verify_model(Std7().float().eval(), input_data=input_data) verify_model(Std8().float().eval(), input_data=input_data) verify_model(Std9().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_var_mean(): """test_forward_var_mean""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10]
class VarMean1(Module): def forward(self, *args): return torch.var_mean(args[0], 1, unbiased=False)
class VarMean2(Module): def forward(self, *args): return torch.var_mean(args[0], dim=1, keepdim=False, unbiased=False)
class VarMean3(Module): def forward(self, *args): return torch.var_mean(args[0], dim=2, keepdim=True, unbiased=False)
class VarMean4(Module): def forward(self, *args): return torch.var_mean(args[0], dim=(2, 3), keepdim=True, unbiased=False)
class VarMean5(Module): def forward(self, *args): return torch.var_mean(args[0], dim=(2, 3), keepdim=False, unbiased=False)
class VarMean6(Module): def forward(self, *args): return torch.var_mean(args[0], unbiased=False)
class VarMean7(Module): def forward(self, *args): return torch.var_mean(args[0], dim=1, keepdim=False, unbiased=True)
class VarMean8(Module): def forward(self, *args): return torch.var_mean(args[0], dim=(2, 3), keepdim=True, unbiased=True)
class VarMean9(Module): def forward(self, *args): return torch.var_mean(args[0], unbiased=True) input_data = torch.rand(input_shape).float() verify_model(VarMean1().float().eval(), input_data=input_data) verify_model(VarMean2().float().eval(), input_data=input_data) verify_model(VarMean3().float().eval(), input_data=input_data) verify_model(VarMean4().float().eval(), input_data=input_data) verify_model(VarMean5().float().eval(), input_data=input_data) verify_model(VarMean6().float().eval(), input_data=input_data) verify_model(VarMean7().float().eval(), input_data=input_data) verify_model(VarMean8().float().eval(), input_data=input_data) verify_model(VarMean9().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_variance(): """test_forward_variance""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10]
class Variance1(Module): def forward(self, *args): return args[0].var(1, unbiased=False)
class Variance2(Module): def forward(self, *args): return args[0].var(dim=1, keepdim=False, unbiased=False)
class Variance3(Module): def forward(self, *args): return args[0].var(dim=2, keepdim=True, unbiased=False)
class Variance4(Module): def forward(self, *args): return args[0].var(dim=(2, 3), keepdim=True, unbiased=False)
class Variance5(Module): def forward(self, *args): return args[0].var(dim=(2, 3), keepdim=False, unbiased=False)
class Variance6(Module): def forward(self, *args): return args[0].var(unbiased=False)
class Variance7(Module): def forward(self, *args): return args[0].var(dim=1, keepdim=False, unbiased=True)
class Variance8(Module): def forward(self, *args): return args[0].var(dim=(2, 3), keepdim=True, unbiased=True)
class Variance9(Module): def forward(self, *args): return args[0].var(unbiased=True) input_data = torch.rand(input_shape).float() verify_model(Variance1().float().eval(), input_data=input_data) verify_model(Variance2().float().eval(), input_data=input_data) verify_model(Variance3().float().eval(), input_data=input_data) verify_model(Variance4().float().eval(), input_data=input_data) verify_model(Variance5().float().eval(), input_data=input_data) verify_model(Variance6().float().eval(), input_data=input_data) verify_model(Variance7().float().eval(), input_data=input_data) verify_model(Variance8().float().eval(), input_data=input_data) verify_model(Variance9().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_rsub(): """test_forward_rsub""" torch.set_grad_enabled(False)
class Rsub1(Module): def forward(self, *args): return torch.rsub(args[0], args[1])
class Rsub2(Module): def forward(self, *args): return torch.rsub(args[0], args[1], alpha=0.5) d1 = torch.rand([1, 3]).float() d2 = torch.rand([1, 3]).float() d3 = torch.rand([1, 3]).int() verify_model(Rsub1().float().eval(), input_data=[d1, d2]) verify_model(Rsub1().float().eval(), input_data=[d1, d3]) verify_model(Rsub2().float().eval(), input_data=[d1, d2]) verify_model(Rsub2().float().eval(), input_data=[d1, d3]) d1 = torch.rand([1, 3]).half() d2 = torch.rand([1, 3]).half() verify_model(Rsub1().half().eval(), input_data=[d1, d2]) verify_model(Rsub1().half().eval(), input_data=[d1, d3]) verify_model(Rsub2().half().eval(), input_data=[d1, d2]) verify_model(Rsub2().half().eval(), input_data=[d1, d3]) @tvm.testing.uses_gpu def test_forward_embedding(): """test_forward_embedding""" torch.set_grad_enabled(False) input_data = torch.randint(0, 10, [2, 4]).long() verify_model(torch.nn.Embedding(10, 3).float().eval(), input_data=input_data) input_data = torch.randint(0, 4, [2, 3, 4]).long() verify_model(torch.nn.Embedding(4, 5, sparse=False).float().eval(), input_data=input_data) input_data = torch.randint(0, 4, [2, 3, 4]).long() verify_model(torch.nn.Embedding(4, 5, sparse=True).float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_onehot(): """test_forward_onehot""" torch.set_grad_enabled(False)
class OneHot1(Module): def forward(self, *args): return torch.nn.functional.one_hot(args[0], num_classes=3)
class OneHot2(Module): def forward(self, *args): return torch.nn.functional.one_hot(args[0], num_classes=5) input_data = torch.arange(0, 5) % 3 verify_model(OneHot1().float().eval(), input_data=input_data) input_data = torch.arange(0, 5) % 4 verify_model(OneHot2().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_isfinite(): """test_forward_isfinite""" torch.set_grad_enabled(False)
class IsFinite1(Module): def forward(self, *args): return torch.isfinite(args[0]) input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float() verify_model(IsFinite1().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_isnan(): """test_forward_isnan""" torch.set_grad_enabled(False)
class IsNan1(Module): def forward(self, *args): return torch.isnan(args[0]) input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float() verify_model(IsNan1().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_isinf(): """test_forward_isinf""" torch.set_grad_enabled(False)
class IsInf1(Module): def forward(self, *args): return torch.isinf(args[0]) input_data = torch.tensor([1, float("inf"), 2, float("-inf"), float("nan")]).float() verify_model(IsInf1().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_clamp(): """test_forward_clamp""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10]
class Clamp1(Module): def forward(self, *args): return torch.clamp(args[0], min=-0.5, max=0.5)
class Clamp2(Module): def forward(self, *args): return torch.clamp(args[0], min=-0.3)
class Clamp3(Module): def forward(self, *args): return torch.clamp(args[0], max=1.0)
class Clamp_MinExpr_MaxConstant(Module): def forward(self, *args): h, w = args[0].shape[2:] amin = h / 100.0 return torch.clamp(args[0], min=amin, max=w) input_data = torch.rand(input_shape).float() verify_model(Clamp1().float().eval(), input_data=input_data) verify_model(Clamp2().float().eval(), input_data=input_data) verify_model(Clamp3().float().eval(), input_data=input_data) verify_model(Clamp_MinExpr_MaxConstant().float().eval(), input_data=input_data) verify_model(lambda inp: torch.clamp_min(inp, 0.5), input_data) inp_uint8 = torch.randint(low=0, high=256, size=(100, 100), dtype=torch.uint8) verify_model(lambda inp: torch.clamp_max(inp, 125), inp_uint8) @tvm.testing.uses_gpu def test_forward_clamp_(): """test_forward_clamp_""" torch.set_grad_enabled(False)
class ClampInPlace(Module): def __init__(self, i_min, i_max): super().__init__() self.min = i_min self.max = i_max def forward(self, *args): return torch.clamp_(args[0], self.min, self.max) for ishape, i_min, i_max in (([4, 8], 0.1, 0.9), ([7, 6], 0.2, 0.5)): input_data = torch.rand(ishape).float() verify_model(ClampInPlace(i_min, i_max).float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_ones(): """test_forward_ones""" torch.set_grad_enabled(False)
class Ones1(Module): def forward(self, *args): return torch.ones(2, 3) verify_model(Ones1().float().eval(), input_data=[]) @tvm.testing.uses_gpu def test_forward_ones_like(): """test_forward_ones_like""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10]
class OnesLike1(Module): def forward(self, *args): return torch.ones_like(args[0])
class OnesLike2(Module): def forward(self, *args): return torch.ones_like(args[0], dtype=torch.int8)
class OnesLike3(Module): def forward(self, *args): return torch.ones_like(args[0], dtype=torch.float) input_data = torch.rand(input_shape).float() verify_model(OnesLike1().float().eval(), input_data=input_data) verify_model(OnesLike2().float().eval(), input_data=input_data) verify_model(OnesLike3().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_new_ones(): """test_forward_new_ones""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10] def test_func(input_tensor): return input_tensor.new_ones([3, 10, 10]) verify_model_with_input(test_func, [torch.rand(input_shape).float()]) @tvm.testing.uses_gpu def test_forward_zeros(): """test_forward_zeros""" torch.set_grad_enabled(False)
class Zeros1(Module): def forward(self, *args): return torch.zeros(2, 3) verify_model(Zeros1().float().eval(), input_data=[]) def test_forward_zero_(): def test_func(x): return x.zero_() verify_model_with_input(test_func, [torch.rand([1, 3, 10, 10]).float()]) @tvm.testing.uses_gpu def test_forward_zeros_like(): """test_forward_zeros_like""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10]
class ZerosLike1(Module): def forward(self, *args): return torch.zeros_like(args[0])
class ZerosLike2(Module): def forward(self, *args): return torch.zeros_like(args[0], dtype=torch.int32)
class ZerosLike3(Module): def forward(self, *args): return torch.zeros_like(args[0], dtype=torch.float) input_data = torch.rand(input_shape).float() verify_model(ZerosLike1().float().eval(), input_data=input_data) verify_model(ZerosLike2().float().eval(), input_data=input_data) verify_model(ZerosLike3().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_full(): """test_forward_full""" torch.set_grad_enabled(False)
class Full1(Module): def forward(self, *args): return torch.full((2, 3), 3.14)
class Full2(Module): def forward(self, *args): return torch.full((1, 2, 3), 1.0, dtype=torch.int32) verify_model(Full1().float().eval(), input_data=[]) verify_model(Full2().float().eval(), input_data=[]) @tvm.testing.uses_gpu def test_forward_full_like(): """test_forward_full_like""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10]
class FullLike1(Module): def forward(self, *args): return torch.full_like(args[0], 3.14)
class FullLike2(Module): def forward(self, *args): return torch.full_like(args[0], 22.22, dtype=torch.int32)
class FullLike3(Module): def forward(self, *args): return torch.full_like(args[0], 1.4, dtype=torch.float) input_data = torch.rand(input_shape).float() verify_model(FullLike1().float().eval(), input_data=input_data) verify_model(FullLike2().float().eval(), input_data=input_data) verify_model(FullLike3().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_new_full(): """test_forward_new_full""" torch.set_grad_enabled(False) input_shape = [1, 3, 10, 10] def test_func(input_tensor): return input_tensor.new_full([2, 3], 1) verify_model_with_input(test_func, [torch.rand(input_shape).float()]) def test_forward_fill_(): def test_func(x): return x.fill_(3) verify_model_with_input(test_func, [torch.rand([1, 3, 10, 10]).float()]) def test_forward_fill_with_div(): """test_forward_fill_with_div""" def test_func(x): y = torch.div(torch.tensor(6.0), torch.tensor(2.0)) return x.fill_(y) verify_model_with_input(test_func, [torch.rand([1, 3, 10, 10]).float()]) @tvm.testing.uses_gpu def test_forward_linspace(): """test_forward_linspace""" torch.set_grad_enabled(False)
class Linspace1(Module): def forward(self, *args): return torch.linspace(5, 10, steps=100)
class Linspace2(Module): def forward(self, *args): return torch.linspace(-10, 10, steps=5)
class Linspace3(Module): def forward(self, *args): return torch.linspace(start=-10, end=10, steps=5)
class Linspace4(Module): def forward(self, *args): return torch.linspace(start=-10, end=10, steps=1)
class Linspace5(Module): def forward(self, *args): return torch.linspace(1, 2, 1, dtype=torch.int32)
class Linspace6(Module): def forward(self, *args): return torch.linspace(start=1, end=6, steps=2)
class Linspace7(Module): def forward(self, *args): return torch.linspace(1, 4, steps=100, dtype=torch.float32)
class Linspace8(Module): def forward(self, *args): return torch.linspace(1, 2, 1, dtype=torch.int16) verify_model(Linspace1().float().eval()) verify_model(Linspace2().float().eval()) verify_model(Linspace3().float().eval()) verify_model(Linspace4().float().eval()) verify_model(Linspace5().float().eval()) verify_model(Linspace6().float().eval()) verify_model(Linspace7().float().eval()) verify_model(Linspace8().float().eval()) @tvm.testing.uses_gpu def test_forward_take(): """test_forward_take""" torch.set_grad_enabled(False)
class Take1(Module): def forward(self, *args): indices = torch.tensor([[0, 0], [1, 0]]) if torch.cuda.is_available(): indices = indices.cuda() return torch.take(args[0], indices)
class Take2(Module): def forward(self, *args): return torch.take(args[0], args[1]) input_data = torch.tensor([[1, 2], [3, 4]]) verify_model(Take1().float().eval(), input_data=input_data) indices = torch.tensor([[0, 0], [1, 0]]) verify_model(Take2().float().eval(), input_data=[input_data, indices]) indices = torch.tensor([0, -1]) verify_model(Take2().float().eval(), input_data=[input_data, indices]) @tvm.testing.uses_gpu def test_forward_topk(): """test_forward_topk""" torch.set_grad_enabled(False)
class Topk1(Module): def forward(self, *args): return torch.topk(args[0], k=3)
class Topk2(Module): def forward(self, *args): return torch.topk(args[0], k=3, dim=-2)
class Topk3(Module): def forward(self, *args): return torch.topk(args[0], k=3, dim=3)
class Topk4(Module): def forward(self, *args): return torch.topk(args[0], k=3, largest=True)
class Topk5(Module): def forward(self, *args): return torch.topk(args[0], k=3, largest=False)
class Topk6(Module): def forward(self, *args): return torch.topk(args[0], k=3, sorted=True) input_shape = [1, 3, 10, 10] input_data = torch.rand(input_shape).float() verify_model(Topk1().float().eval(), input_data=input_data) verify_model(Topk2().float().eval(), input_data=input_data) verify_model(Topk3().float().eval(), input_data=input_data) verify_model(Topk4().float().eval(), input_data=input_data) verify_model(Topk5().float().eval(), input_data=input_data) verify_model(Topk6().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_logical_not(): """test_forward_logical_not""" torch.set_grad_enabled(False)
class LogicalNot1(Module): def forward(self, *args): return torch.logical_not(args[0]) input_data = torch.tensor([True, False]) verify_model(LogicalNot1().float().eval(), input_data=input_data) input_data = torch.tensor([0, 1, -10], dtype=torch.int8) verify_model(LogicalNot1().float().eval(), input_data=input_data) input_data = torch.tensor([0.0, 1.5, -10.0], dtype=torch.double) verify_model(LogicalNot1().float().eval(), input_data=input_data) input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32) verify_model(LogicalNot1().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_bitwise_not(): """test_forward_bitwise_not""" torch.set_grad_enabled(False)
class BitwiseNot1(Module): def forward(self, *args): return torch.bitwise_not(args[0]) input_data = torch.tensor([0, 1, -10], dtype=torch.int8) verify_model(BitwiseNot1().float().eval(), input_data=input_data) input_data = torch.tensor([0.0, 1.0, -10.0], dtype=torch.int32) verify_model(BitwiseNot1().float().eval(), input_data=input_data) input_data = torch.tensor([True, False]) verify_model(BitwiseNot1().float().eval(), input_data=input_data) @tvm.testing.uses_gpu def test_forward_bitwise_xor(): """test_forward_bitwise_xor""" torch.set_grad_enabled(False)
class BitwiseXor1(Module): def forward(self, *args): return torch.bitwise_xor(args[0], args[1])
class BitwiseXor2(Module): def forward(self, *args): rhs = torch.tensor([1, 0, 3], dtype=torch.int8) if torch.cuda.is_available(): rhs = rhs.cuda() return torch.bitwise_xor(args[0], rhs) lhs = torch.tensor([-1, -2, 3], dtype=torch.int8) rhs = torch.tensor([1, 0, 3], dtype=torch.int8) verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs]) lhs = torch.tensor([True, True, False]) rhs = torch.tensor([False, True, False]) verify_model(BitwiseXor1().float().eval(), input_data=[lhs, rhs]) lhs = torch.tensor([-1, -2, 3], dtype=torch.int8) verify_model(BitwiseXor2().float().eval(), input_data=[lhs]) @tvm.testing.uses_gpu def test_forward_logical_xor(): """test_forward_logical_xor""" torch.set_grad_enabled(False)
class LogicalXor1(Module): def forward(self, *args): return torch.logical_xor(args[0], args[1])
class LogicalXor2(Module): def forward(self, *args): rhs = torch.tensor([1, 0, 3], dtype=torch.int8) if torch.cuda.is_available(): rhs = rhs.cuda() return torch.logical_xor(args[0], rhs) lhs = torch.tensor([-1, -2, 3], dtype=torch.int8) rhs = torch.tensor([1, 0, 3], dtype=torch.int8) verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs]) lhs = torch.tensor([True, True, False]) rhs = torch.tensor([False, True, False]) verify_model(LogicalXor1().float().eval(), input_data=[lhs, rhs]) lhs = torch.tensor([-1, -2, 3], dtype=torch.int8) verify_model(LogicalXor2().float().eval(), input_data=[lhs]) @tvm.testing.uses_gpu def test_forward_unary(): """test_forward_unary""" torch.set_grad_enabled(False)
class Sqrt1(Module): def forward(self, *args): return torch.sqrt(args[0])
class RSqrt1(Module): def forward(self, *args): return torch.rsqrt(args[0])
class Ceil1(Module): def forward(self, *args): return torch.ceil(args[0])