text
stringlengths 1
2.05k
|
---|
class Floor1(Module):
def forward(self, *args):
return torch.floor(args[0]) |
class Round1(Module):
def forward(self, *args):
return torch.round(args[0]) |
class Cos1(Module):
def forward(self, *args):
return torch.cos(args[0]) |
class Sin1(Module):
def forward(self, *args):
return torch.sin(args[0]) |
class Tan1(Module):
def forward(self, *args):
return torch.tan(args[0]) |
class Tanh1(Module):
def forward(self, *args):
return torch.tanh(args[0]) |
class Acos1(Module):
def forward(self, *args):
return torch.acos(args[0]) |
class Asin1(Module):
def forward(self, *args):
return torch.asin(args[0]) |
class Atan1(Module):
def forward(self, *args):
return torch.atan(args[0]) |
class Log1(Module):
def forward(self, *args):
return torch.log(args[0]) |
class Exp1(Module):
def forward(self, *args):
return torch.exp(args[0]) |
class Erf1(Module):
def forward(self, *args):
return torch.erf(args[0]) |
class Trunc1(Module):
def forward(self, *args):
return torch.trunc(args[0]) |
class Sign1(Module):
def forward(self, *args):
return torch.sign(args[0]) |
class Neg1(Module):
def forward(self, *args):
return torch.neg(args[0]) |
class Sinh1(Module):
def forward(self, *args):
return torch.sinh(args[0]) |
class Cosh1(Module):
def forward(self, *args):
return torch.cosh(args[0]) |
class Log2_1(Module):
def forward(self, *args):
return torch.log2(args[0]) |
class Log10_1(Module):
def forward(self, *args):
return torch.log10(args[0]) |
class Log1p_1(Module):
def forward(self, *args):
return torch.log1p(args[0]) |
class Square(Module):
def forward(self, *args):
return torch.square(args[0])
input_shape = [1, 3, 10, 10]
input_data = torch.rand(input_shape).float()
verify_model(Square().float().eval(), input_data=input_data)
verify_model(Sqrt1().float().eval(), input_data=input_data)
verify_model(RSqrt1().float().eval(), input_data=input_data)
verify_model(Ceil1().float().eval(), input_data=input_data)
verify_model(Floor1().float().eval(), input_data=input_data)
verify_model(Round1().float().eval(), input_data=input_data)
verify_model(Cos1().float().eval(), input_data=input_data)
verify_model(Cosh1().float().eval(), input_data=input_data)
verify_model(Sin1().float().eval(), input_data=input_data)
verify_model(Sinh1().float().eval(), input_data=input_data)
verify_model(Tan1().float().eval(), input_data=input_data)
verify_model(Tanh1().float().eval(), input_data=input_data)
verify_model(Acos1().float().eval(), input_data=input_data)
verify_model(Asin1().float().eval(), input_data=input_data)
verify_model(Atan1().float().eval(), input_data=input_data)
verify_model(Log1().float().eval(), input_data=input_data)
verify_model(Log2_1().float().eval(), input_data=input_data)
verify_model(Log10_1().float().eval(), input_data=input_data)
verify_model(Log1p_1().float().eval(), input_data=input_data)
verify_model(Exp1().float().eval(), input_data=input_data)
verify_model(Erf1().float().eval(), input_data=input_data)
verify_model(Trunc1().float().eval(), input_data=input_data)
verify_model(Sign1().float().eval(), input_data=input_data)
verify_model(Neg1().float().eval(), input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_tril():
"""test_forward_tril"""
torch.set_grad_enabled(False)
def test_func(input_data):
return torch.tril(input_data)
input_data = torch.rand([3, 3]).float()
verify_model(test_func, input_data=input_data)
input_data = torch.rand([1, 3, 10, 10]).float() |
verify_model(test_func, input_data=input_data)
def test_func1(input_data):
return torch.tril(input_data, 1)
input_data = torch.rand([3, 3]).float()
verify_model(test_func1, input_data=input_data)
input_data = torch.rand([1, 3, 10, 10]).float()
verify_model(test_func1, input_data=input_data)
def test_func2(input_data):
return torch.tril(input_data, -1)
input_data = torch.rand([3, 3]).float()
verify_model(test_func2, input_data=input_data)
input_data = torch.rand([1, 3, 10, 10]).float()
verify_model(test_func2, input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_triu():
"""test_forward_triu"""
torch.set_grad_enabled(False)
def test_func(input_data):
return torch.triu(input_data)
input_data = torch.rand([3, 3]).float()
verify_model(test_func, input_data=input_data)
input_data = torch.rand([1, 3, 10, 10]).float()
verify_model(test_func, input_data=input_data)
def test_func1(input_data):
return torch.triu(input_data, 1)
input_data = torch.rand([3, 3]).float()
verify_model(test_func1, input_data=input_data)
input_data = torch.rand([1, 3, 10, 10]).float()
verify_model(test_func1, input_data=input_data)
def test_func2(input_data):
return torch.triu(input_data, -1)
input_data = torch.rand([3, 3]).float()
verify_model(test_func2, input_data=input_data)
input_data = torch.rand([1, 3, 10, 10]).float()
verify_model(test_func2, input_data=input_data)
@tvm.testing.uses_gpu
def test_forward_where():
"""test_forward_where"""
torch.set_grad_enabled(False) |
class Where1(Module):
def forward(self, *args):
y = torch.ones([3, 2])
if torch.cuda.is_available():
y = y.cuda()
return torch.where(args[0] > 0, args[0], y) |
class Where2(Module):
def forward(self, *args):
return torch.where(args[0] > 0, args[0], args[1]) |
class Where3(Module):
def forward(self, *args):
return torch.where(args[0])[0]
x = torch.rand([3, 2]).float()
verify_model(Where1(), input_data=[x])
y = torch.rand([3, 2])
verify_model(Where2(), input_data=[x, y])
inp = torch.rand([10])
inp[3:8] = 0
verify_trace_model(Where3(), [inp], ["llvm"])
@tvm.testing.uses_gpu
def test_forward_addcdiv():
"""test_forward_addcdiv"""
torch.set_grad_enabled(False) |
class Addcdiv1(Module):
def forward(self, *args):
t1 = torch.ones([3, 1])
t2 = torch.ones([1, 3])
if torch.cuda.is_available():
t1 = t1.cuda()
t2 = t2.cuda()
return torch.addcdiv(args[0], 0.1, t1, t2) |
class Addcdiv2(Module):
def forward(self, *args):
return torch.addcdiv(args[0], 0.5, args[1], args[2])
input_data = torch.rand([1, 3]).float()
verify_model(Addcdiv1().float().eval(), input_data=input_data)
t1 = torch.rand([3, 1]).float()
t2 = torch.rand([1, 3]).float()
verify_model(Addcdiv2().float().eval(), input_data=[input_data, t1, t2])
@tvm.testing.uses_gpu
def test_forward_addcmul():
"""test_forward_addcmul"""
torch.set_grad_enabled(False) |
class Addcmul1(Module):
def forward(self, *args):
t1 = torch.ones([3, 1])
t2 = torch.ones([1, 3])
if torch.cuda.is_available():
t1 = t1.cuda()
t2 = t2.cuda()
return torch.addcmul(args[0], 0.1, t1, t2) |
class Addcmul2(Module):
def forward(self, *args):
return torch.addcmul(args[0], 0.5, args[1], args[2])
input_data = torch.rand([1, 3]).float()
verify_model(Addcmul1().float().eval(), input_data=input_data)
t1 = torch.rand([3, 1]).float()
t2 = torch.rand([1, 3]).float()
verify_model(Addcmul2().float().eval(), input_data=[input_data, t1, t2])
@tvm.testing.uses_gpu
def test_forward_true_divide():
"""test_forward_true_divide"""
if package_version.parse(torch.__version__) < package_version.parse("1.5.0"):
return
torch.set_grad_enabled(False) |
class TrueDivide(Module):
def forward(self, *args):
return torch.true_divide(args[0], args[1])
dividend = torch.rand([5, 3]).float()
divisor_tensor = torch.rand([5, 3]).float() + 0.5
divisor_scalar = torch.tensor(1.0, dtype=torch.float32)
verify_model(
TrueDivide().float().eval(), input_data=[dividend, divisor_tensor], atol=1e-4, rtol=1e-4
)
verify_model(
TrueDivide().float().eval(), input_data=[dividend, divisor_scalar], atol=1e-4, rtol=1e-4
)
@tvm.testing.uses_gpu
def test_forward_is_floating_point():
"""test_forward_is_floating_point"""
torch.set_grad_enabled(False) |
class IsFloatingPoint(Module):
def forward(self, arg):
return torch.is_floating_point(arg)
targets = _get_default_vm_targets()
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float64)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float32)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.float16)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int64)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int32)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int16)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.int8)
verify_script_model(IsFloatingPoint(), [(1, 1)], targets, idtype=torch.uint8)
@tvm.testing.uses_gpu
def test_forward_traced_function():
"""test_forward_traced_function"""
def fn(t1, t2):
return t1 + t2
tensor1 = torch.randn(3, 4)
tensor2 = torch.randn(3, 4)
verify_model(fn, input_data=[tensor1, tensor2])
@tvm.testing.uses_gpu
def test_forward_dtypes():
"""test_forward_dtypes"""
def fn(t1, t2):
return 2.5 * t1 + t2
for dt in [torch.int32, torch.int64, torch.double]:
tensor1 = torch.randn(3, 4).to(dtype=dt)
tensor2 = torch.randn(3, 4).to(dtype=dt)
verify_model(fn, input_data=[tensor1, tensor2]) |
class ModuleWithIntParameters(Module):
def __init__(self, arr):
super().__init__()
self.param = torch.nn.Parameter(torch.LongTensor(arr), requires_grad=False)
def forward(self, x):
return x.long() + self.param
shape = (10, 10)
param = torch.ones(shape, dtype=torch.long)
inp = torch.ones(shape, dtype=torch.int)
verify_model(ModuleWithIntParameters(param), input_data=inp)
@tvm.testing.uses_gpu
def test_weight_names():
tm = torch.jit.trace(torch.nn.Linear(3, 4), [torch.randn(2, 3)])
_, params = relay.frontend.from_pytorch(tm, [("input", (2, 3))])
assert set(params.keys()) == set(n for n, _ in tm.named_parameters())
@tvm.testing.uses_gpu
def test_duplicate_weight_use():
"""test_duplicate_weight_use""" |
class Test(Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(5, 3)
def forward(self, x):
x = self.lin(x)
x = x @ self.lin.weight
return x
verify_model(Test(), input_data=[torch.randn(5, 5)])
@tvm.testing.uses_gpu
def test_forward_matmul():
"""test_forward_matmul"""
torch.set_grad_enabled(False) |
class MatMul1(Module):
def forward(self, *args):
return torch.matmul(args[0], args[1])
tensor1 = torch.randn(3, 4)
tensor2 = torch.randn(4)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
tensor1 = torch.randn(4)
tensor2 = torch.randn(4, 3)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
tensor1 = torch.randn(10, 4)
tensor2 = torch.randn(4, 10)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.dense"])
tensor1 = torch.randn(10, 3, 4)
tensor2 = torch.randn(10, 4, 5)
verify_model(
MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.batch_matmul"]
)
tensor1 = torch.randn(10, 3, 4)
tensor2 = torch.randn(4, 5)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.dense"])
tensor1 = torch.randn(10, 4)
tensor2 = torch.randn(3, 4, 5)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2], expected_ops=["nn.dense"])
tensor1 = torch.randn(1, 12, 14, 64)
tensor2 = torch.randn(1, 12, 64, 14)
verify_model(MatMul1().float().eval(), input_data=[tensor1, tensor2])
def test_forward_index():
"""test_forward_index"""
torch.set_grad_enabled(False)
input_shape = [3, 4, 5, 6] |
class Index0(Module):
def forward(self, x):
return x[[0, 1], [0, 2], :2, 4]
input_data = torch.rand(input_shape).float()
verify_model(Index0().eval(), input_data=input_data) |
class Index1(Module):
def forward(self, x):
return x[[0], [1, 2, 3, 0], [3, 1, 2, 2], [4, 2, 1, 0]]
input_data = torch.rand(input_shape).float()
verify_model(Index1().eval(), input_data=input_data)
def test_fn_bool_mask():
return lambda data, mask: data[0, mask]
data = torch.tensor([[1, 2, 3], [4, 5, 6]])
mask = torch.tensor([True, True, False])
verify_trace_model(test_fn_bool_mask(), [data, mask], ["llvm", "cuda"])
def test_logsumexp():
"""test_logsumexp""" |
class Logsumexp(Module):
def __init__(self, dim, keepdim=False):
super().__init__()
self.dim = dim
self.keepdim = keepdim
def forward(self, x):
return torch.logsumexp(x, self.dim, self.keepdim)
input_shape = (100, 100)
input_data = torch.rand(input_shape)
verify_model(Logsumexp(0), input_data=input_data)
verify_model(Logsumexp(0, keepdim=True), input_data=input_data)
verify_model(Logsumexp(1, keepdim=True), input_data=input_data.double())
def test_stack():
"""test_stack""" |
class Stack(torch.nn.Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return torch.stack((x, x), dim=self.axis)
inp = torch.randn(8, 8, 8)
verify_model(Stack(), input_data=inp)
verify_model(Stack(axis=-1), input_data=inp)
verify_model(Stack(axis=3), input_data=inp)
verify_model(Stack(axis=-4), input_data=inp)
def test_stack_dynamic():
"""test_stack_dynamic""" |
class Stack(torch.nn.Module):
def forward(self, x):
tensor_list = []
for i in range(x.size(0)):
tensor_list += [x[i]]
return torch.stack(tensor_list, dim=0)
verify_script_model(Stack(), [(8, 8, 8)], _get_default_vm_targets())
def test_forward_unbind():
"""test_forward_unbind""" |
class Unbind(torch.nn.Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return torch.unbind(x, self.axis)
inp = torch.randn(8, 8, 8)
verify_model(Unbind(0), input_data=inp)
verify_model(Unbind(1), input_data=inp)
verify_model(Unbind(2), input_data=inp)
def test_forward_nonzero():
"""test_forward_nonzero""" |
class Nonzero(Module):
def __init__(self, as_tuple=False):
super().__init__()
self.as_tuple = as_tuple
def forward(self, data):
return torch.nonzero(data, as_tuple=self.as_tuple)
inp = torch.Tensor(np.array([[0, 1, 0], [2, 0, 9], [-1, -1, 0]]).astype("float32"))
verify_trace_model(Nonzero(), [inp], ["llvm"])
def test_forward_scatter():
"""test_forward_scatter"""
def test_fn_scatter(dim):
return lambda data, index, src: torch.scatter(data, dim=dim, index=index, src=src)
def test_fn_scatter_add(dim):
return lambda data, index, src: torch.scatter_add(data, dim=dim, index=index, src=src)
in_data = torch.zeros(3, 5)
in_index = torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]])
in_src = torch.rand(2, 5)
targets = ["llvm", "cuda"]
verify_trace_model(test_fn_scatter(0), [in_data, in_index, in_src], targets)
verify_trace_model(test_fn_scatter_add(0), [in_data, in_index, in_src], targets)
in_data = torch.zeros(2, 4)
in_index = torch.tensor([[2], [3]])
in_src = torch.rand(2, 1)
verify_trace_model(test_fn_scatter(1), [in_data, in_index, in_src], targets)
verify_trace_model(test_fn_scatter_add(1), [in_data, in_index, in_src], targets)
def test_forward_index_put():
"""test_forward_index_put"""
def test_fn_index_put2():
return lambda data, xidx, yidx, values: torch.index_put(
data, indices=[xidx, yidx], values=values
)
def test_fn_index_put3a():
return lambda data, xidx, yidx, zidx, values: torch.index_put(
data, indices=[xidx, yidx, zidx], values=values, accumulate=True
)
shape = (3, 5)
in_data = torch.zeros(shape)
xidx = torch.tensor([0, 1, 2, 2])
yidx = torch.tensor([0, 1, 3, 4])
values = torch.tensor([2.0, 4.0, 7.0, 9.0])
targets = ["llvm", "cuda"]
verify_trace_model(test_fn_index_put2(), [in_data, xidx, yidx, values], targets)
shape = (3, 5, 3)
in_data = torch.zer |
os(shape)
xidx = torch.tensor([0, 1, 2, 2, 0])
yidx = torch.tensor([0, 1, 3, 4, 0])
zidx = torch.tensor([0, 1, 1, 2, 0])
values = torch.tensor([2.0, 4.0, 7.0, 9.0, 1.0])
verify_trace_model(test_fn_index_put3a(), [in_data, xidx, yidx, zidx, values], targets)
def test_numel():
"""test_numel""" |
class Numel(Module):
def forward(self, data):
return torch.tensor(torch.numel(data))
targets = _get_default_vm_targets()
verify_script_model(Numel(), [(1,)], targets)
verify_script_model(Numel(), [(3, 5)], targets)
verify_script_model(Numel(), [(3, 5, 8)], targets)
def test_empty():
"""Test for aten::empty"""
def test_func():
return torch.empty([1, 3, 10, 10])
verify_model_with_input(test_func, [], assert_shape_only=True)
def test_empty_like():
"""Test for aten::empty_like"""
def test_func(data):
return torch.empty_like(data)
verify_model_with_input(test_func, [torch.rand([1, 3, 10, 10]).float()], assert_shape_only=True)
@tvm.testing.uses_gpu
def test_new_empty():
"""test_forward_new_ones"""
torch.set_grad_enabled(False)
input_shape = [1, 3, 10, 10]
def test_func(input_tensor):
return input_tensor.new_empty([3, 10, 10])
verify_model_with_input(test_func, [torch.rand(input_shape).float()], assert_shape_only=True)
def test_func1(input_tensor):
return input_tensor.new_empty([3, 10, 10], dtype=torch.int32)
verify_model_with_input(test_func1, [torch.rand(input_shape).float()], assert_shape_only=True)
def test_randn():
"""Test for aten::randn"""
def test_func():
return torch.randn([1, 3, 10, 10])
verify_model_with_input(test_func, [], assert_shape_only=True)
def test_func1():
return torch.randn(1, 3, 10, 10)
verify_model_with_input(test_func1, [], assert_shape_only=True)
def test_forward_pretrained_bert_base_uncased():
"""
Refer the bert example given in https:
.. code-block:: bash
pip install pytorch_pretrained_bert==0.6.2 --user
"""
try:
from pytorch_pretrained_bert |
import BertForMaskedLM, BertTokenizer
except ImportError:
print("Torch pretrained bert package must be installed to run this script.")
return
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
tokenized_text = tokenizer.tokenize(text)
masked_index = 8
tokenized_text[masked_index] = "[MASK]"
assert tokenized_text == [
"[CLS]",
"who",
"was",
"jim",
"henson",
"?",
"[SEP]",
"jim",
"[MASK]",
"was",
"a",
"puppet",
"
"[SEP]",
]
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
model = BertForMaskedLM.from_pretrained("bert-base-uncased")
model.eval()
with torch.no_grad():
torch_preds = model(tokens_tensor, segments_tensors)
scripted_model = torch.jit.trace(model, (tokens_tensor, segments_tensors)).eval()
input_1 = "input_ids"
input_2 = "input.2"
shape_list = [(input_1, list(tokens_tensor.shape)), (input_2, list(segments_tensors.shape))]
mod, params = relay.frontend.from_pytorch(scripted_model, shape_list)
target = "llvm"
with tvm.transform.PassContext(opt_level=3):
relay_graph, relay_lib, relay_params = relay.build(mod, target=target, params=params)
dev = tvm.device(target, 0)
relay_model = graph_executor.create(relay_graph, relay_lib, dev)
relay_model.set_input(**relay_params)
relay_model.set_input(input_1, tokens_tensor)
relay_model.set_input(input_2, segments_tensors)
relay_model.run()
compiled_output = relay_model.get_output(0).numpy() |
tvm.testing.assert_allclose(torch_preds, compiled_output, rtol=1e-3, atol=1e-3)
torch_pred_idx = torch.argmax(torch_preds[0, masked_index]).item()
torch_pred_token = tokenizer.convert_ids_to_tokens([torch_pred_idx])[0]
tvm_pred_idx = compiled_output[0, masked_index].argmax()
tvm_pred_token = tokenizer.convert_ids_to_tokens([tvm_pred_idx])[0]
assert torch_pred_idx == tvm_pred_idx
assert torch_pred_token == tvm_pred_token
print(f"Torch top-1 id: {torch_pred_idx}, token: {torch_pred_idx}")
print(f"TVM top-1 id: {tvm_pred_idx}, token: {tvm_pred_token}")
@pytest.mark.skipif(
platform.machine() == "aarch64",
reason="Currently failing on AArch64",
)
def test_convert_torch_script_with_input_types():
"""test_convert_torch_script_with_input_types"""
def model_fn(x, y):
x = x.to(dtype=torch.int32)
y = x + y
return y
ishape = (4, 5)
input_x = torch.rand(ishape, dtype=torch.float32)
input_y = torch.randint(low=0, high=100, size=ishape, dtype=torch.int32)
inputs = [input_x, input_y]
script_module = torch.jit.trace(model_fn, inputs)
fname = "tmp.pt"
torch.jit.save(script_module, fname)
loaded = torch.jit.load(fname)
os.remove(fname)
verify_model(loaded.eval(), input_data=inputs)
def expected(x_shape, y_shape):
x = relay.var("x", shape=x_shape, dtype="float32")
y = relay.var("y", shape=y_shape, dtype="int32")
args = [x, y]
x1 = relay.cast(x, "int32")
y1 = relay.add(x1, y)
mod = tvm.IRModule.from_expr(relay.Function(args, y1))
return mod["main"]
input_infos = [("input0", (ishape, "float")), ("input1", (ishape, "int"))]
mod, _ = relay.frontend.from_pytorch(loaded, input_infos)
expected_mod = expected(ishape, ishape)
assert tvm.ir.structural_equal(expected_mod, mod["main"], map_free_vars=True)
def test_bincount():
"""test_bincount"""
def test_fn(x, weights=None): |
return torch.bincount(x, weights=weights)
inp = torch.randint(0, 100, (10000,), dtype=torch.int64)
weights = torch.linspace(0, 100, steps=10000)
targets = ["llvm", "cuda"]
verify_trace_model(test_fn, [inp], targets)
verify_trace_model(test_fn, [inp, weights], targets)
def test_hard_swish():
"""test_hard_swish"""
examples = [torch.rand(8).float(), torch.rand(8, 10).float(), torch.rand(1, 1, 10).float()]
for input_data in examples:
verify_model(torch.nn.Hardswish().eval(), input_data=input_data)
verify_model(torch.nn.Hardswish(inplace=True).eval(), input_data=input_data)
def test_hard_sigmoid():
"""test_hard_sigmoid"""
examples = [torch.rand(8).float(), torch.rand(8, 10).float(), torch.rand(1, 1, 10).float()]
for input_data in examples:
verify_model(torch.nn.Hardsigmoid().eval(), input_data=input_data)
verify_model(torch.nn.Hardsigmoid(inplace=True).eval(), input_data=input_data)
def test_cumsum():
"""test_cumsum"""
def test_fn(dim, dtype=None):
return lambda x: torch.cumsum(x, dim=dim, dtype=dtype)
inp = torch.randint(0, 100, (10000,), dtype=torch.int32)
verify_model(test_fn(0), [inp])
verify_model(test_fn(0), [inp.to(torch.int64)])
verify_model(test_fn(0, dtype=torch.int64), [inp.to(torch.int64)])
inp = torch.randn((100, 100), dtype=torch.float32)
verify_model(test_fn(dim=0, dtype=torch.float64), [inp])
verify_model(test_fn(dim=1), [inp])
inp = torch.randn((100, 100), dtype=torch.float32) > 0.5
verify_model(test_fn(dim=0, dtype=torch.int32), [inp])
def test_masked_fill():
"""test_transformer"""
def test_fn(x, mask):
return torch.masked_fill(x, mask, 0.0)
inp = torch.randn(100, 100)
verify_model(test_fn, [inp, inp > 0.5])
verify_model(test_fn, [inp.to(torch.float64), inp > 0.5])
def test_transformer():
"""test_transformer"""
model = torch.nn.Transformer(d_model=256, nhead=8, num_encoder_layers=6, num_decoder_layers=6)
model = |
model.eval()
src = torch.rand((10, 32, 256))
tgt = torch.rand((20, 32, 256))
verify_model(model.eval(), input_data=[src, tgt])
def test_argsort():
"""test_argsort"""
def test_fn(dim, descending):
return lambda x: torch.argsort(x, dim=dim, descending=descending)
inp = torch.randn(100)
verify_model(test_fn(0, True), [inp])
verify_model(test_fn(0, False), [inp])
inp = torch.randn(100, 100)
verify_model(test_fn(0, True), [inp])
verify_model(test_fn(0, False), [inp])
verify_model(test_fn(1, True), [inp])
verify_model(test_fn(1, False), [inp])
def test_sort():
"""test_sort"""
def test_fn(dim, descending):
return lambda x: torch.sort(x, dim=dim, descending=descending)
inp = torch.randn(100)
verify_model(test_fn(0, True), [inp])
verify_model(test_fn(-1, False), [inp])
inp = torch.randn(100, 100)
verify_model(test_fn(0, True), [inp])
verify_model(test_fn(-2, False), [inp])
verify_model(test_fn(1, True), [inp])
verify_model(test_fn(-1, False), [inp])
def test_logical_and():
"""test_logical_and"""
def test_fn(x, y):
return torch.logical_and(x, y)
a = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
b = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
verify_model(test_fn, [a, b])
a = torch.tensor([True, False, True])
b = torch.tensor([True, False, False])
verify_model(test_fn, [a, b])
def test_masked_select():
"""test_masked_select"""
def test_fn(x, mask):
return torch.masked_select(x, mask)
for shape in [(10,), (3, 4), (16, 32, 64)]:
x = torch.randn(*shape)
mask = x.ge(0.5)
verify_trace_model(test_fn, [x, mask], ["llvm", "cuda"])
def test_unique():
"""test_unique"""
def test_fn(is_sorted, return_inverse, return_counts):
return lambda x: torch.unique(x, is_sorted, return_inverse, return_counts)
in_data = torch.randint(0, 20, (10,), dtype=torch.int32)
targets = ["llvm", "cuda"]
verify_trace_m |
odel(test_fn(True, True, True), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
verify_trace_model(test_fn(True, True, False), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
in_data = torch.randint(0, 20, (20,), dtype=torch.int64)
verify_trace_model(test_fn(True, True, True), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
verify_trace_model(test_fn(True, True, False), [in_data], targets)
verify_trace_model(test_fn(True, False, True), [in_data], targets)
def test_forward_nll_loss():
"""test_forward_nll_loss"""
torch.set_grad_enabled(False)
N, C = 10, 3
predictions = torch.rand((N, C)).float()
targets = torch.randint(0, 3, (N,))
weights = torch.tensor([1, 2, 3]).float()
verify_model(torch.nn.NLLLoss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(weight=weights).eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(ignore_index=1).eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(reduction="none").eval(), input_data=[predictions, targets])
d1, d2 = 2, 3
predictions = torch.rand((N, C, d1, d2)).float()
targets = torch.randint(0, 3, (N, d1, d2))
verify_model(torch.nn.NLLLoss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(weight=weights).eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(ignore_index=1).eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.NLLLoss(reduction="none").eval(), input_data=[predictions, targets])
def test_cross_entropy_loss():
"""test_cross_entropy_loss"""
torch.set_grad_enabled(False)
N, C = 10, 3
predictions = torch.rand((N, C)).fl |
oat()
targets = torch.randint(0, 3, (N,))
weights = torch.tensor([1, 2, 3]).float()
verify_model(torch.nn.CrossEntropyLoss().eval(), input_data=[predictions, targets])
verify_model(
torch.nn.CrossEntropyLoss(weight=weights).eval(), input_data=[predictions, targets]
)
predictions = torch.randn(N, C).float()
targets = torch.randn(N, C)
verify_model(torch.nn.CrossEntropyLoss().eval(), input_data=[predictions, targets])
def test_forward_l1_loss():
"""test_forward_l1_loss"""
torch.set_grad_enabled(False)
N, C = 10, 3
predictions = torch.rand((N, C)).float()
targets = torch.rand((N, C)).float()
verify_model(torch.nn.L1Loss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.L1Loss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.L1Loss(reduction="none").eval(), input_data=[predictions, targets])
d1, d2 = 2, 3
predictions = torch.rand((N, C, d1, d2)).float()
targets = torch.rand((N, C, d1, d2)).float()
verify_model(torch.nn.L1Loss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.L1Loss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.L1Loss(reduction="none").eval(), input_data=[predictions, targets])
def test_forward_mse_loss():
"""test_forward_mse_loss"""
torch.set_grad_enabled(False)
N, C = 10, 3
predictions = torch.rand((N, C)).float()
targets = torch.rand((N, C)).float()
verify_model(torch.nn.MSELoss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.MSELoss(reduction="sum").eval(), input_data=[predictions, targets])
verify_model(torch.nn.MSELoss(reduction="none").eval(), input_data=[predictions, targets])
d1, d2 = 2, 3
predictions = torch.rand((N, C, d1, d2)).float()
targets = torch.rand((N, C, d1, d2)).float()
verify_model(torch.nn.MSELoss().eval(), input_data=[predictions, targets])
verify_model(torch.nn.MSELoss(reduction="sum").eval |
(), input_data=[predictions, targets])
verify_model(torch.nn.MSELoss(reduction="none").eval(), input_data=[predictions, targets])
@tvm.testing.uses_gpu
def test_forward_flip():
"""Test for aten::flip"""
torch.set_grad_enabled(False) |
class Flip(Module):
def __init__(self, axis=0):
super().__init__()
self.axis = axis
def forward(self, x):
return x.flip([self.axis])
input_t = torch.randn(2, 3, 4)
verify_model(Flip(axis=0), input_data=input_t)
verify_model(Flip(axis=1), input_data=input_t)
verify_model(Flip(axis=2), input_data=input_t)
verify_model(Flip(axis=-1), input_data=input_t)
def test_annotate_span():
"""test_annotate_span"""
model = torchvision.models.resnet18().eval()
inp = torch.randn([1, 3, 224, 224])
trace = torch.jit.trace(model, inp).eval()
mod, _ = relay.frontend.from_pytorch(
trace, [("input", inp.shape)], use_parser_friendly_name=True
)
relay.transform.AnnotateSpans()(mod)
@tvm.testing.uses_gpu
def test_all_any():
"""test_all_any"""
def test_fn(f, dim=None, keepdim=False):
return lambda x: f(x, dim=dim, keepdim=keepdim)
def test_fn_no_arg(f):
return lambda x: f(x)
for f in [torch.all, torch.any]:
verify_model(test_fn(f, 0), [torch.rand(1, 2).bool()])
verify_model(test_fn(f, 0), [torch.arange(0, 3).to(torch.uint8)])
verify_model(test_fn(f, 1), [torch.rand(4, 2).bool()])
verify_model(test_fn(f, 0, keepdim=True), [torch.rand(4, 2).bool()])
verify_model(test_fn_no_arg(f), [torch.rand(1, 2).bool()])
verify_model(test_fn_no_arg(f), [torch.arange(0, 3).to(torch.uint8)])
@tvm.testing.uses_gpu
def test_searchsorted():
"""test_searchsorted"""
def test_fn(out_int32=False, right=False):
return lambda x, y: torch.searchsorted(x, y, out_int32=out_int32, right=right)
sorted_sequence = torch.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
values = torch.tensor([[3, 6, 9], [3, 6, 9]])
verify_model(test_fn(), [sorted_sequence, values])
verify_model(test_fn(out_int32=True), [sorted_sequence[0], values[0]])
verify_model(test_fn(right=True), [sorted_sequence, values])
sorted_sequence_1d = torch.tensor([1, 3, 5, 7, |
9])
values = torch.tensor([[3, 6, 9], [4, 2, 7]])
verify_model(test_fn(), [sorted_sequence_1d, values])
verify_model(test_fn(), [sorted_sequence_1d, torch.tensor(6)])
@tvm.testing.uses_gpu
def test_bucketize():
"""test_bucketize"""
def test_fn(out_int32=False, right=False):
return lambda x, y: torch.bucketize(x, y, out_int32=out_int32, right=right)
boundaries = torch.tensor([1, 3, 5, 7, 9])
values = torch.tensor([3, 6, 9])
verify_model(test_fn(), [values, boundaries])
verify_model(test_fn(out_int32=True, right=True), [values, boundaries])
@tvm.testing.uses_gpu
def test_roll():
"""Test for aten::roll"""
def test_fn(shifts, dims):
return lambda x: torch.roll(x, shifts, dims)
x = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8]).view(4, 2)
verify_model(test_fn(1, 0), [x])
verify_model(test_fn(-1, 0), [x])
verify_model(test_fn(shifts=(2, 1), dims=(0, 1)), [x])
@tvm.testing.uses_gpu
def test_einsum():
"""test_einsum"""
def test_fn(equation):
return lambda *x: torch.einsum(equation, *x)
x = torch.ones([2, 3])
y = torch.ones([3, 4])
z = torch.ones([4, 5])
verify_model(test_fn("ij,jk"), [x, y])
verify_model(test_fn("ij,jk,km->im"), [x, y, z])
def test_stft():
"""test_stft"""
def test_fn(n_fft, hop_length, win_length, center, pad_mode, normalized, onesided):
return lambda input, window=None: torch.stft(
input=input,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
normalized=normalized,
onesided=onesided,
)
input_t = torch.rand([1, 12]).float()
window = torch.tensor([2, 3, 4], dtype=torch.int32)
targets = ["llvm", "cuda"]
verify_trace_model(test_fn(3, 3, 3, False, "constant", False, True), [input_t, window], targets)
verify_trace_model(test_fn(3, 3, 3, True, "constant", False, True), [input_t, win |
dow], targets)
verify_trace_model(test_fn(3, 3, 3, False, "reflect", False, True), [input_t, window], targets)
verify_trace_model(test_fn(3, 3, 3, True, "reflect", False, True), [input_t, window], targets)
verify_trace_model(test_fn(3, 3, 3, True, "reflect", True, True), [input_t, window], targets)
verify_trace_model(test_fn(3, 3, 3, True, "reflect", False, False), [input_t, window], targets)
input_t = torch.rand([2, 12]).float()
window = torch.tensor([2, 3, 4], dtype=torch.int32)
verify_trace_model(test_fn(3, 3, 3, False, "reflect", False, True), [input_t, window], targets)
window = torch.tensor([1, 3], dtype=torch.int32)
verify_trace_model(test_fn(2, 1, 2, False, "reflect", False, True), [input_t, window], targets)
verify_trace_model(test_fn(2, 1, 2, False, "reflect", False, True), [input_t], targets)
@tvm.testing.uses_gpu
def test_dot():
"""Test for aten::dot"""
def test_fn(x):
return x.dot(x)
x = torch.randn([4])
verify_model(test_fn, [x])
@tvm.testing.uses_gpu
def test_mv():
"""Test for aten::mv"""
def test_fn(m, v):
return m.mv(v)
verify_model(test_fn, [torch.randn(4, 4), torch.randn(4)])
verify_model(test_fn, [torch.randn(2, 2), torch.randn(2)])
verify_model(test_fn, [torch.randn(3, 8), torch.randn(8)])
def test_grid_sample():
"""test_grid_sample""" |
class Grid_sample(Module):
def __init__(self, method, padding_mode, align_corners):
super().__init__()
self._method = method
self._padding_mode = padding_mode
self._align_corners = align_corners
def forward(self, x, y):
return torch.nn.functional.grid_sample(
input=x,
grid=y,
mode=self._method,
padding_mode=self._padding_mode,
align_corners=self._align_corners,
)
methods = ["nearest", "bilinear", "bicubic"]
padding_modes = ["zeros", "border", "reflection"]
align_corners = [True, False]
data_2D = torch.rand([4, 4, 8, 8]).float()
grid_2D = torch.rand([4, 16, 16, 2]).float()
data_3D = torch.rand([4, 4, 4, 4, 4]).float()
grid_3D = torch.rand([4, 8, 8, 8, 3]).float()
for _method in methods:
for _padding in padding_modes:
for _align in align_corners:
model = Grid_sample(_method, _padding, _align)
verify_model(model, input_data=[data_2D, grid_2D])
if _method != "bicubic":
verify_model(model, input_data=[data_3D, grid_3D])
def test_list_tuple():
"""test compilation error for a Python list followed by a prim::TupleConstruct.""" |
class List_tuple(Module):
"""List_tuple"""
def forward(self, x):
"""forward"""
merged = []
mask_list = []
for i in range(3):
w0 = torch.sigmoid(x)
merged.append((w0, w0))
mask_list.append(x)
for i in range(3):
merged[i] = merged[i][0] + merged[i][1]
return mask_list[2], merged
x = torch.rand([4, 4, 16, 32]).float()
script_module = torch.jit.trace(List_tuple(), x, strict=False).eval()
relay.frontend.from_pytorch(script_module, [("x", x.shape)])
@tvm.testing.uses_gpu
def test_binary_bitwise():
"""Test for binary bitwise"""
def test_ior(x, y):
return x.__ior__(y)
def test_iand(x, y):
return x.__iand__(y)
def test_ixor(x, y):
return x.__ixor__(y)
x = torch.tensor([7, 49, 16, 1, 2, 3], dtype=torch.uint8)
y = torch.tensor([39, 128, 99, 228, 63, 17], dtype=torch.uint8)
for test_fn in [test_ior, test_iand, test_ixor]:
verify_model(test_fn, [x, y])
@tvm.testing.uses_gpu
def test_shift():
"""Test for aten::__lshift__, aten::__rshift__"""
def test_lshift(x, y):
return x << y
def test_rshift(x, y):
return x >> y
x = torch.tensor([39, 128, 99, 228, 63, 17], dtype=torch.int32)
y = torch.tensor([3, 2, 7, 4, 5, 9], dtype=torch.int32)
for test_fn in [test_lshift, test_rshift]:
verify_model(test_fn, [x, y])
@tvm.testing.uses_gpu
def test_mod():
"""Test for aten::fmod"""
def test_fmod(x, y):
return torch.fmod(x, y)
def test_remainder(x, y):
return torch.remainder(x, y)
for test_fn in [test_fmod, test_remainder]:
verify_model(test_fn, [torch.tensor([-3.0, -2, -1, 1, 2, 3]), torch.tensor(2)])
verify_model(test_fn, [torch.tensor([1, 2, 3, 4, 5]), torch.tensor(-1.5)])
def test_softmax_fuse():
"""test_softmax_fuse""" |
class Model(torch.nn.Module):
"""Pytorch model module"""
def __init__(self, nchwc_post_op=False) -> None:
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, (1, 1), 1)
self.nchwc_post_op = nchwc_post_op
@torch.no_grad()
def forward(self, x):
"""forward"""
t0a = self.conv(x)
t0b = torch.floor(x)
t2b = torch.softmax(t0a, dim=2)
if self.nchwc_post_op:
t3a = t0a - t0b
t4a = t2b - t0b
t6a = t3a + t4a
return t6a
return t2b + 1
sh = [3, 3, 10, 1]
inp = torch.ones(*sh, dtype=torch.float32)
for model in [Model(nchwc_post_op=False).eval(), Model(nchwc_post_op=True).eval()]:
output_torch = model(inp).numpy()
mod, params = relay.frontend.from_pytorch(torch.jit.trace(model, inp), [("inp0", sh)])
with tvm.transform.PassContext(opt_level=4):
out = (
relay.create_executor("graph", mod, params=params)
.evaluate()(inp0=inp.numpy())
.numpy()
)
tvm.testing.assert_allclose(out, output_torch, rtol=1e-5, atol=1e-5)
@tvm.testing.uses_gpu
def test_lerp():
"""test_lerp"""
def test_fn(x, y, w):
return torch.lerp(x, y, w)
input_shape = [16]
x = torch.rand(input_shape).float()
y = torch.rand(input_shape).float()
w = torch.rand(input_shape).float()
verify_model(test_fn, [x, y, w])
verify_model(test_fn, [x, y, w[0]])
def test_trilu():
def _test_trilu(op, diagonal):
return lambda inp: op(inp, diagonal)
for op in [torch.triu, torch.tril]:
verify_model(_test_trilu(op, 0), [torch.rand(size=[3, 3])])
verify_model(_test_trilu(op, 1), [torch.rand(size=[6, 6])])
verify_model(_test_trilu(op, -2), [torch.rand(size=[6, 6])])
def test_multinomial():
def _test_multinomial(num_samples):
return lambda inp: torch.multinomial(inp, |
num_samples=num_samples, replacement=True)
verify_model(
_test_multinomial(2), [torch.rand(size=[3]).float()], cpu_only=True, check_correctness=False
)
verify_model(
_test_multinomial(1),
[torch.rand(size=[4, 5]).float()],
cpu_only=True,
check_correctness=False,
)
if __name__ == "__main__":
tvm.testing.main() |
""" Tests on fx-quantized torch model conversion """ |
import torch |
import torchvision |
import numpy as np
from torch.quantization |
import get_default_qconfig
from torch.quantization.quantize_fx |
import prepare_fx, convert_fx
from torchvision.models.efficientnet |
import efficientnet_b4
from torchvision.models.resnet |
import resnet50
from tvm |
import relay
def quantize(model):
qconfig = get_default_qconfig("fbgemm")
qconfig_dict = {"": qconfig}
return convert_fx(prepare_fx(model, qconfig_dict))
def quantize_and_build(model, in_size):
inp = torch.rand(1, 3, in_size, in_size)
input_name = "inp"
qmodel = quantize(model)
with torch.no_grad():
script_module = torch.jit.trace(qmodel, inp)
mod, _ = relay.frontend.from_pytorch(script_module, [(input_name, inp.shape)])
mod = relay.transform.InferType()(mod)
assert "qnn.conv2d" in mod.astext(show_meta_data=False)
def test_ssd_vgg(): |
class TraceWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inp):
features = self.model.backbone(inp)
features = list(features.values())
out = self.model.head(features)
return out["bbox_regression"], out["cls_logits"]
model_func = torchvision.models.detection.ssd300_vgg16
model = TraceWrapper(model_func(num_classes=50, pretrained_backbone=True)).eval()
quantize_and_build(model, 300)
def test_deeplab_v3(): |
class TraceWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, inp):
out = self.model(inp)
return out["out"]
deeplabv3 = torchvision.models.segmentation.deeplabv3_mobilenet_v3_large(pretrained=True)
model = TraceWrapper(deeplabv3.eval()).eval()
quantize_and_build(model, 300)
def test_imagenet():
for model_func in [resnet50, efficientnet_b4]:
quantize_and_build(model_func(pretrained=True).eval(), 224) |
""" Tests on torch lstm model conversion """ |
import numpy as np |
import torch |
import torch.nn as nn
from torch.nn |
import Parameter |
import torch.jit as jit
from typing |
import List, Tuple
from torch |
import Tensor |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.relay.frontend.pytorch |
import from_pytorch
from tvm.relay.prelude |
import Prelude
from tvm.runtime.container |
import ADT, tuple_object |
class LayerNormLSTMCell(jit.ScriptModule):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = Parameter(torch.randn(4 * hidden_size, input_size))
self.weight_hh = Parameter(torch.randn(4 * hidden_size, hidden_size))
ln = nn.LayerNorm
self.layernorm_i = ln(4 * hidden_size)
self.layernorm_h = ln(4 * hidden_size)
self.layernorm_c = ln(hidden_size)
@jit.script_method
def forward(self, input, state):
hx, cx = state
igates = self.layernorm_i(torch.mm(input, self.weight_ih.t()))
hgates = self.layernorm_h(torch.mm(hx, self.weight_hh.t()))
gates = igates + hgates
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = self.layernorm_c((forgetgate * cx) + (ingate * cellgate))
hy = outgate * torch.tanh(cy)
return hy, (hy, cy) |
class LSTMLayer(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super().__init__()
self.cell = cell(*cell_args)
@jit.script_method
def forward(self, input, state):
outputs = []
for i in range(input.size(0)):
out, state = self.cell(input[i], state)
outputs += [out]
return torch.stack(outputs), state |
class ReverseLSTMLayer(jit.ScriptModule):
def __init__(self, cell, *cell_args):
super(ReverseLSTMLayer, self).__init__()
self.cell = cell(*cell_args)
@jit.script_method
def forward(self, inputs, state):
outputs = jit.annotate(List[Tensor], [])
seq_len = inputs.size(0)
for i in range(seq_len):
out, state = self.cell(inputs[seq_len - i - 1], state)
outputs = [out] + outputs
return torch.stack(outputs), state |
class BidirLSTMLayer(jit.ScriptModule):
__constants__ = ["directions"]
def __init__(self, cell, *cell_args):
super(BidirLSTMLayer, self).__init__()
self.directions = nn.ModuleList(
[
LSTMLayer(cell, *cell_args),
ReverseLSTMLayer(cell, *cell_args),
]
)
@jit.script_method
def forward(self, input, states):
outputs = jit.annotate(List[Tensor], [])
output_states = jit.annotate(List[Tuple[Tensor, Tensor]], [])
for (i, direction) in enumerate(self.directions):
state = states[i]
out, out_state = direction(input, state)
outputs += [out]
output_states += [out_state]
return torch.cat(outputs, 0), output_states
def init_stacked_lstm(num_layers, layer, first_layer_args, other_layer_args):
layers = [layer(*first_layer_args)] + [layer(*other_layer_args) for _ in range(num_layers - 1)]
return nn.ModuleList(layers) |
class StackedLSTM(jit.ScriptModule):
__constants__ = ["layers"]
def __init__(self, num_layers, layer, first_layer_args, other_layer_args):
super().__init__()
self.layers = init_stacked_lstm(num_layers, layer, first_layer_args, other_layer_args)
@jit.script_method
def forward(self, input, states):
output_states = jit.annotate(List[Tuple[Tensor, Tensor]], [])
output = input
for (i, rnn_layer) in enumerate(self.layers):
state = states[i]
output, out_state = rnn_layer(output, state)
output_states += [out_state]
return output, output_states |
class StackedBidirLSTM(jit.ScriptModule):
__constants__ = ["layers"]
def __init__(self, num_layers, layer, first_layer_args, other_layer_args):
super(StackedBidirLSTM, self).__init__()
self.layers = init_stacked_lstm(num_layers, layer, first_layer_args, other_layer_args)
@jit.script_method
def forward(self, input, states):
output_states = jit.annotate(List[List[Tuple[Tensor, Tensor]]], [])
output = input
for (i, rnn_layer) in enumerate(self.layers):
state = states[i]
output, out_state = rnn_layer(output, state)
output_states += [out_state]
return output, output_states
def lstm(input_size, hidden_size):
return LSTMLayer(LayerNormLSTMCell, input_size, hidden_size)
def stacked_lstm(input_size, hidden_size, num_layers):
return StackedLSTM(
num_layers,
LSTMLayer,
first_layer_args=[LayerNormLSTMCell, input_size, hidden_size],
other_layer_args=[LayerNormLSTMCell, hidden_size, hidden_size],
)
def bidir_lstm(input_size, hidden_size):
return BidirLSTMLayer(LayerNormLSTMCell, input_size, hidden_size)
def stacked_bidir_lstm(input_size, hidden_size, num_layers):
return StackedBidirLSTM(
num_layers,
BidirLSTMLayer,
first_layer_args=[LayerNormLSTMCell, input_size, hidden_size],
other_layer_args=[LayerNormLSTMCell, hidden_size, hidden_size],
)
def vmobj_to_list(o, dtype="float32"):
if isinstance(o, tvm.nd.NDArray):
return [o]
elif isinstance(o, tvm.runtime.container.ADT):
result = []
for f in o:
result.extend(vmobj_to_list(f, dtype))
return result
else:
raise RuntimeError("Unknown object type: %s" % type(o))
def assert_equal(tvm_result, torch_result):
if isinstance(torch_result, (tuple, list)):
assert isinstance(tvm_result, list)
for tvm_res, pt_res in zip(tvm_result, torch_result):
assert_equal(tvm_res, pt_ |
res)
elif isinstance(torch_result, torch.Tensor):
tvm.testing.assert_allclose(tvm_result.numpy(), torch_result.numpy(), rtol=1e-4, atol=1e-4)
def run_and_compare(mod, params, pt_result, target, device):
exec_res = relay.create_executor("vm", mod=mod, device=device, target=target).evaluate()(
**params
)
def flatten(nested):
res = []
for r in nested:
if isinstance(r, torch.Tensor):
res.append(r)
else:
res.extend(flatten(r))
return res
if isinstance(exec_res, tvm.runtime.container.ADT):
assert not isinstance(pt_result, torch.Tensor)
tvm_res = vmobj_to_list(exec_res)
torch_res = flatten(pt_result)
else:
tvm_res = exec_res
torch_res = pt_result
assert_equal(tvm_res, torch_res)
def convert_list_to_vmobj(py_lst):
def wrap_nd_array(arr):
return tvm.nd.array(arr, device=tvm.cpu(0))
mod = tvm.IRModule()
prelude = Prelude(mod)
list, cons, nil = mod.get_type("List")
adt_lst = ADT(nil.tag, [])
for elem in reversed(py_lst):
if isinstance(elem, np.ndarray):
vmobj = wrap_nd_array(elem)
elif isinstance(elem, tuple):
vmobj = tuple_object([wrap_nd_array(e) for e in elem])
elif isinstance(elem, list):
vmobj = convert_list_to_vmobj(elem)
adt_lst = ADT(cons.tag, [vmobj, adt_lst])
return adt_lst
@tvm.testing.uses_gpu
def test_custom_lstm():
input_name = "input"
states_name = "states"
seq_len = 5
batch = 2
input_size = 3
hidden_size = 4
num_layers = 3
state_tensor_shape = (batch, hidden_size)
torch.manual_seed(1)
inp = torch.randn(seq_len, batch, input_size)
input_shapes = [
(input_name, (seq_len, batch, input_size)),
(states_name, (state_tensor_shape, state_tensor_shape)),
]
input_shapes_stacked = [
(input_name, (seq_len, batch, input_size)),
(
states_name, |
[(state_tensor_shape, state_tensor_shape), (state_tensor_shape, state_tensor_shape)],
),
]
input_shapes_stacked_bidir = [
(input_name, (seq_len, batch, input_size)),
(
states_name,
[
[(state_tensor_shape, state_tensor_shape) for _ in range(2)]
for _ in range(num_layers)
],
),
]
states = [
(torch.randn(state_tensor_shape), torch.randn(state_tensor_shape))
for _ in range(num_layers)
]
bidir_states = [
(torch.randn(state_tensor_shape), torch.randn(state_tensor_shape)) for _ in range(2)
]
stacked_bidir_states = [
[(torch.randn(state_tensor_shape), torch.randn(state_tensor_shape)) for _ in range(2)]
for _ in range(num_layers)
]
models = [
("lstm", lstm(input_size, hidden_size).eval(), states[0], input_shapes),
(
"stacked",
stacked_lstm(input_size, hidden_size, num_layers).eval(),
states,
input_shapes_stacked,
),
("bidir", bidir_lstm(input_size, hidden_size).eval(), bidir_states, input_shapes_stacked),
]
for (name, raw_model, states, input_shapes) in models:
script_module = torch.jit.script(raw_model)
mod, params = from_pytorch(script_module, input_shapes)
with torch.no_grad():
pt_result = raw_model(inp.clone(), states)
params[input_name] = inp.numpy()
if isinstance(states, tuple):
states_np = tuple(st.numpy() for st in states)
elif isinstance(states, list) and isinstance(states[0], torch.Tensor):
states_np = [st.numpy() for st in states]
elif isinstance(states, list) and isinstance(states[0], tuple):
states_np = [tuple(st.numpy() for st in states[i]) for i in range(len(states))]
elif isinstance(states, list) and isinstance(states[0], list):
states_np = [ |
[tuple(st.numpy() for st in states) for states in states[layer]]
for layer in range(num_layers)
]
else:
assert False
if isinstance(states_np, list):
params[states_name] = convert_list_to_vmobj(states_np)
else:
params[states_name] = states_np
for tgt, dev in tvm.testing.enabled_targets():
print("Running %s on target %s" % (name, tgt))
run_and_compare(mod, params, pt_result, target=tgt, device=dev) |
"""Test torch vision fasterrcnn and maskrcnn models""" |
import numpy as np |
import cv2 |
import torch |
import torchvision |
import tvm |
import tvm.testing
from tvm |
import relay
from tvm.runtime.vm |
import VirtualMachine
from tvm.relay.frontend.pytorch_utils |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.