text
stringlengths
1
2.05k
import ( rewrite_nms_to_batched_nms, rewrite_batched_nms_with_max_out_size, rewrite_scatter_to_gather, ) from tvm.contrib.download
import download in_size = 300 def process_image(img): img = cv2.imread(img).astype("float32") img = cv2.resize(img, (in_size, in_size)) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) img = torch.from_numpy(img / 255.0).permute(2, 0, 1).float() img = torch.unsqueeze(img, axis=0) return img def do_trace(model, inp, in_size=in_size): model_trace = torch.jit.trace(model, inp) model_trace.eval() return model_trace def dict_to_tuple(out_dict): if "masks" in out_dict.keys(): return out_dict["boxes"], out_dict["scores"], out_dict["labels"], out_dict["masks"] return out_dict["boxes"], out_dict["scores"], out_dict["labels"]
class TraceWrapper(torch.nn.Module): def __init__(self, model): super().__init__() self.model = model def forward(self, inp): out = self.model(inp) return dict_to_tuple(out[0]) def generate_jit_model(index): model_funcs = [ torchvision.models.detection.fasterrcnn_resnet50_fpn, torchvision.models.detection.maskrcnn_resnet50_fpn, ] model_func = model_funcs[index] model = TraceWrapper(model_func(pretrained=True, rpn_pre_nms_top_n_test=1000)) model.eval() inp = torch.Tensor(np.random.uniform(0.0, 250.0, size=(1, 3, in_size, in_size))) with torch.no_grad(): out = model(inp) script_module = do_trace(model, inp) script_out = script_module(inp) assert len(out[0]) > 0 and len(script_out[0]) > 0 return script_module def test_detection_models(): img = "test_street_small.jpg" img_url = ( "https: ) download(img_url, img) input_shape = (1, 3, in_size, in_size) input_name = "input0" shape_list = [(input_name, input_shape)] scripted_model = generate_jit_model(1) mod, params = relay.frontend.from_pytorch(scripted_model, shape_list) data = process_image(img) data_np = data.detach().numpy() with torch.no_grad(): pt_res = scripted_model(data) def compile_and_run_vm(mod, params, data_np, target): with tvm.transform.PassContext(opt_level=3): vm_exec = relay.vm.compile(mod, target=target, params=params) dev = tvm.device(target, 0) vm = VirtualMachine(vm_exec, dev) vm.set_input("main", **{input_name: data_np}) return vm.run() for target in ["llvm"]: tvm_res = compile_and_run_vm(mod, params, data_np, target) tvm.testing.assert_allclose( pt_res[0].cpu().numpy(), tvm_res[0].numpy(), rtol=1e-5, atol=1e-5 ) tvm.testing.assert_allclose( pt_res[1].cpu().numpy(), tvm_res[1].numpy(), rtol=1e-5, atol=1e-5
) np.testing.assert_equal(pt_res[2].cpu().numpy(), tvm_res[2].numpy()) score_threshold = 0.9 print("Num boxes:", pt_res[0].cpu().numpy().shape[0]) print("Num valid boxes:", np.sum(pt_res[1].cpu().numpy() >= score_threshold)) before = mod["main"] mod = rewrite_nms_to_batched_nms(mod) after = mod["main"] assert not tvm.ir.structural_equal(after, before) before = mod["main"] mod = rewrite_scatter_to_gather(mod, 4) after = mod["main"] assert not tvm.ir.structural_equal(after, before) tvm_res_after_rewrite = compile_and_run_vm(mod, params, data_np, "llvm") for res1, res2 in zip(tvm_res, tvm_res_after_rewrite): tvm.testing.assert_allclose(res1.numpy(), res2.numpy())
import torch
import tvm
import tvm.testing
import onnx
import io
import sys from tvm
import relay from tvm.contrib
import graph_executor from torch
import nn lstm_feature_size = 16 lstm_hidden_size = 32 lstm_projection_size = 20 gru_feature_size = 8 gru_hidden_size = 16 num_layers = 2 seqs_length = 2 batch_size = 2 rnn_feature_size = 8 rnn_hidden_size = 16
class RNN_Model(nn.Module): """ It is base class for RNN layer classes. It contains some common fields and methods for child classes. """ def __init__( self, ): super().__init__() self.model = None def forward(self, input, hidden_init=None): """ Computes the output tensor after input inference along RNN layer. :param input: batch of data as a tensor of shape (seqs_length, batch_size, feature_size) or (batch_size, seqs_length, feature_size) if self.batch_first = True :param hidden_init: initial hidden state(s) of the RNN as a tensor(s) of shape (num_layers, batch_size, hidden_size). Will default to a tensor of zeros if None. :return: the output tensor of shape (batch_size, hidden_size) """ if self.model is None: raise NotImplementedError("self.model must be defined in subclasses!") out, _ = self.model(input, hidden_init) return out def gen_rnd_weights(self): """ Generate random weigths for the model """ if self.model is None: raise NotImplementedError("self.model must be defined in subclasses!") with torch.no_grad(): for weight_group in self.model.all_weights: for weight in weight_group: weight.data = torch.rand(weight.shape) def get_dummy_inputs(self): raise NotImplementedError("subclasses must override get_dummy_inputs()!") def get_input_names(self): raise NotImplementedError("subclasses must override get_input_names()!") def get_shape_desc(self, frontend_type): raise NotImplementedError("subclasses must override get_shape_desc(frontend_type)!") def get_tvm_inputs(self, dtype): raise NotImplementedError("subclasses must override get_tvm_inputs(dtype)!")
class RNN_Model_Impl(RNN_Model): def __init__( self, seq_len=seqs_length, batch_size=batch_size, feature_size=rnn_feature_size, hidden_size=rnn_hidden_size, batch_first=False, layer_num=1, bidirectional=False, use_bias=True, rnd_weights_init=False, nonlinearity="tanh", dropout=0.0, ): super().__init__() self.shape = [seq_len, batch_size, feature_size] if batch_first: self.shape = [batch_size, seq_len, feature_size] layers_num = 2 * layer_num if bidirectional else layer_num self.h0_shape = [layers_num, batch_size, hidden_size] self.dummy_inputs = (torch.rand(self.shape), torch.zeros(self.h0_shape)) self.model = nn.RNN( input_size=feature_size, hidden_size=hidden_size, num_layers=layer_num, nonlinearity=nonlinearity, bias=use_bias, batch_first=batch_first, dropout=dropout, bidirectional=bidirectional, ) if rnd_weights_init: self.gen_rnd_weights() def gen_rnd_weights(self): super().gen_rnd_weights() def get_dummy_inputs(self): return self.dummy_inputs def get_input_names(self): return ["input", "h0"] def get_shape_desc(self, frontend_type): shape_desc = None if frontend_type == "pt": shape_desc = [("input", self.shape)] elif frontend_type == "onnx": shape_desc = { "input": self.shape, "h0": self.h0_shape, } return shape_desc def get_tvm_inputs(self, dtype): return { "input": tvm.nd.array(self.dummy_inputs[0].numpy().astype(dtype)), "h0": tvm.nd.array(self.dummy_inputs[1].numpy().astype(dtype)), }
class GRU_Model(RNN_Model): def __init__( self, seq_len=seqs_length, batch_size=batch_size, feature_size=gru_feature_size, hidden_size=gru_hidden_size, batch_first=False, layer_num=1, bidirectional=False, use_bias=True, rnd_weights_init=False, ): super().__init__() self.shape = [seq_len, batch_size, feature_size] if batch_first: self.shape = [batch_size, seq_len, feature_size] layers_num = 2 * layer_num if bidirectional else layer_num self.h0_shape = [layers_num, batch_size, hidden_size] self.dummy_inputs = (torch.rand(self.shape), torch.zeros(self.h0_shape)) self.model = nn.GRU( input_size=feature_size, hidden_size=hidden_size, num_layers=layer_num, bidirectional=bidirectional, batch_first=batch_first, bias=use_bias, ) if rnd_weights_init: self.gen_rnd_weights() def gen_rnd_weights(self): """ Generate random weigths for the model with biases For first uni- and bidirectional weights group: Wi (3*hidden_size, feature_size) Wh (3*hidden_size, hidden_size) Bi (3*hidden_size) Bh (3*hidden_size) For other weights group: Wi (3*hidden_size, hidden_size) Wh (3*hidden_size, hidden_size) Bi (3*hidden_size) Bh (3*hidden_size) For generation of random weigths for the model without biases the Bi and Bh weights are skipped """ super().gen_rnd_weights() def get_dummy_inputs(self): return self.dummy_inputs def get_input_names(self): return ["input", "h0"] def get_shape_desc(self, frontend_type): shape_desc = None if frontend_type == "pt": shape_desc = [("input", self.shape)] elif frontend_type == "onnx": shape_desc = {
"input": self.shape, "h0": self.h0_shape, } return shape_desc def get_tvm_inputs(self, dtype): return { "input": tvm.nd.array(self.dummy_inputs[0].numpy().astype(dtype)), "h0": tvm.nd.array(self.dummy_inputs[1].numpy().astype(dtype)), } def check_torch_version_for_proj_in_lstm(): """ proj_size parameter is supported in torch.nn.LSTM layer started from 1.8.0 torch version """ me = False version = torch.__version__ major, minor, micro = version.split(".") if int(major) > 1: me = True elif int(major) == 1: if int(minor) >= 8: me = True return me
class LSTM_Model(RNN_Model): def __init__( self, seq_len=seqs_length, batch_size=batch_size, feature_size=lstm_feature_size, hidden_size=lstm_hidden_size, batch_first=False, layer_num=1, bidirectional=False, proj_size=0, use_bias=True, rnd_weights_init=False, ): super().__init__() self.shape = [seq_len, batch_size, feature_size] if batch_first: self.shape = [batch_size, seq_len, feature_size] layers_num = 2 * layer_num if bidirectional else layer_num self.h0_shape = [layers_num, batch_size, hidden_size] if proj_size > 0: self.h0_shape = [layers_num, batch_size, proj_size] self.c0_shape = [layers_num, batch_size, hidden_size] self.dummy_inputs = ( torch.rand(self.shape), (torch.zeros(self.h0_shape), torch.zeros(self.c0_shape)), ) if check_torch_version_for_proj_in_lstm(): self.model = nn.LSTM( input_size=lstm_feature_size, hidden_size=lstm_hidden_size, num_layers=layer_num, bidirectional=bidirectional, proj_size=proj_size, batch_first=batch_first, bias=use_bias, ) else: if proj_size > 0: print( "WARNING: projection is not supported for torch version less than 1.8.0! ", "LSTM was constructed without projection!", ) self.model = nn.LSTM( input_size=lstm_feature_size, hidden_size=lstm_hidden_size, num_layers=layer_num, bidirectional=bidirectional, batch_first=batch_first, bias=use_bias, ) if rnd_weights_init: self.gen_rnd_weights() def gen_rnd_weights(self): """ Generate random
weigths for the model with biases Without projection: For first weights group: Wi (4*lstm_hidden_size, lstm_feature_size) Wh (4*lstm_hidden_size, lstm_hidden_size) Bi (4*lstm_hidden_size) Bh (4*lstm_hidden_size) For first bidirectional weights group: Wi (4*lstm_hidden_size, lstm_feature_size) Wh (4*lstm_hidden_size, lstm_hidden_size) Bi (4*lstm_hidden_size) Bh (4*lstm_hidden_size) For other weights group: Wi (4*lstm_hidden_size, lstm_hidden_size) Wh (4*lstm_hidden_size, lstm_hidden_size) Bi (4*lstm_hidden_size) Bh (4*lstm_hidden_size) With projection: For first weights group: Wi (4*lstm_hidden_size, lstm_feature_size) Wh (4*lstm_hidden_size, proj_size) Bi (4*lstm_hidden_size) Bh (4*lstm_hidden_size) P (proj_size, lstm_hidden_size) For first bidirectional weights group: Wi (4*lstm_hidden_size, lstm_feature_size) Wh (4*lstm_hidden_size, proj_size) Bi (4*lstm_hidden_size) Bh (4*lstm_hidden_size) P (proj_size, lstm_hidden_size) For other weights group: Wi (4*lstm_hidden_size, proj_size * num_directions) Wh (4*lstm_hidden_size, proj_size) Bi (4*lstm_hidden_size) Bh (4*lstm_hidden_size) P (proj_size, lstm_hidden_size) For generation of random weigths for the model without biases Bi and Bh are skipped """ super().gen_rnd_weights() def get_dummy_inputs(self): return self.dummy_inputs def get_input_names(self): return ["input", "h0", "c0"] def get_shape_desc(self, frontend_type): shape_desc = None if frontend_type == "pt": sh
ape_desc = [("input", self.shape)] elif frontend_type == "onnx": shape_desc = { "input": self.shape, "h0": self.h0_shape, "c0": self.c0_shape, } return shape_desc def get_tvm_inputs(self, dtype): return { "input": tvm.nd.array(self.dummy_inputs[0].numpy().astype(dtype)), "h0": tvm.nd.array(self.dummy_inputs[1][0].numpy().astype(dtype)), "c0": tvm.nd.array(self.dummy_inputs[1][1].numpy().astype(dtype)), } def compare(input, gold_data, rtol=1e-5, atol=1e-5): tvm.testing.assert_allclose(input, gold_data, rtol=rtol, atol=atol) def check_rnn(rnn_type, rnn_mod, target=tvm.target.Target("llvm -mcpu=core-avx2"), dev=tvm.cpu(0)): def get_model( rnn_type, rnn_mod, args, ): if "b" in rnn_mod: args["bidirectional"] = True if "s" in rnn_mod: args["layer_num"] = num_layers if "tanh" in rnn_mod: args["nonlinearity"] = "tanh" if "relu" in rnn_mod: args["nonlinearity"] = "relu" if rnn_type == "GRU": RNN_Model_selector = GRU_Model elif rnn_type == "LSTM": RNN_Model_selector = LSTM_Model if "p" in rnn_mod: args["proj_size"] = lstm_projection_size elif rnn_type == "RNN": RNN_Model_selector = RNN_Model_Impl return RNN_Model_selector(**args) def get_onnx_model(model): onnx_io = io.BytesIO() with torch.no_grad(): input_names = model.get_input_names() inputs = model.get_dummy_inputs() torch.onnx.export(model, inputs, onnx_io, input_names=input_names) onnx_io.seek(0, 0) return onnx.load_model(onnx_io) model = None dtype = "float32" device = torch.device("cpu") for batch_first in (True, False): for use_bias in (True, False): for rnd_weights in [True
]: model_inputs = { "batch_first": batch_first, "use_bias": use_bias, "rnd_weights_init": rnd_weights, } model = get_model(rnn_type, rnn_mod, model_inputs) model.to(device) model.eval() dummy_inputs = model.get_dummy_inputs() golden_output = model.forward(dummy_inputs[0].to(device)).detach().cpu().numpy() tvm_output = None for format in ["pt"]: shape_desc = model.get_shape_desc(format) if format == "pt": traced_script_module = torch.jit.trace(model, dummy_inputs[0]).eval() mod, params = relay.frontend.from_pytorch(traced_script_module, shape_desc) elif format == "onnx": try: onnx_model = get_onnx_model(model) except: print( "WARNING: torch.onnx.export does not support conversion LSTM with projection " "from pytorch! TODO: waiting for the support and correct test after that." ) continue mod, params = relay.frontend.from_onnx(onnx_model, shape_desc) with tvm.transform.PassContext(opt_level=3): lib = relay.build(mod, target=target, params=params) m = graph_executor.GraphModule(lib["default"](dev)) tvm_inputs = model.get_tvm_inputs(dtype) m.set_input(**tvm_inputs) m.run() tvm_output = m.get_output(0).numpy()
compare(tvm_output, golden_output) @tvm.testing.uses_gpu def test_rnns(): for target, dev in tvm.testing.enabled_targets(): for mod_type in ["uni", "s", "b", "sb"]: check_rnn("GRU", mod_type, target, dev) for mod_type in ["uni", "s", "b", "sb"]: check_rnn("LSTM", mod_type, target, dev) for mod_type in ["uni", "s", "b", "sb", "tanh", "relu"]: check_rnn("RNN", mod_type, target, dev) if __name__ == "__main__": test_rnns()
""" BatchNorm without given mean and variance given testcases ==================== This is a test script to test fused_batch_norm operators in TensorFlow frontend when mean and variance are not given. """
import tvm
import tvm.testing
import numpy as np try:
import tensorflow.compat.v1 as tf tf.disable_v2_behavior() except ImportError:
import tensorflow as tf from tvm
import relay from tensorflow.python.framework
import graph_util def verify_fused_batch_norm(shape): g = tf.Graph() with g.as_default(): input_tensor = tf.placeholder(tf.float32, shape=shape, name="input") alpha = tf.constant( np.random.rand( shape[-1], ), dtype=tf.float32, name="alpha", ) beta = tf.constant( np.random.rand( shape[-1], ), dtype=tf.float32, name="beta", ) bn = tf.nn.fused_batch_norm(x=input_tensor, offset=beta, scale=alpha, name="bn") out = tf.identity(bn[0], name="output") data = np.random.rand(*shape) with tf.Session(graph=out.graph) as sess: sess.run([tf.global_variables_initializer()]) tf_out = sess.run(out, feed_dict={input_tensor: data}) constant_graph = graph_util.convert_variables_to_constants(sess, sess.graph_def, ["output"]) for device in ["llvm"]: dev = tvm.device(device, 0) if not tvm.testing.device_enabled(device): print("Skip because %s is not enabled" % device) continue mod, params = relay.frontend.from_tensorflow(constant_graph, outputs=["output"]) with tvm.transform.PassContext(opt_level=3): graph, lib, params = relay.build(mod, target=device, params=params) from tvm.contrib
import graph_executor m = graph_executor.create(graph, lib, dev) m.set_input(**params) m.set_input("input", data) m.run() tvm_out = m.get_output(0) tvm.testing.assert_allclose( tvm_out.numpy(), tf_out.astype(tvm_out.dtype), atol=1e-3, rtol=1e-3 ) def test_fused_batch_norm(): verify_fused_batch_norm(shape=(1, 12, 12, 32)) verify_fused_batch_norm(shape=(1, 24, 24, 64)) verify_fused_batch_norm(shape=(1, 64, 64, 128)) verify_fused_batch_norm(shape=(8, 12, 12, 32)) verify_fused_batch_norm(shape=(16, 12, 12, 32)) verify_fused_batch_norm(shape=(32, 12, 12, 32)) if __name__ == "__main__": test_fused_batch_norm()
"""Unit tests for converting TensorFlow control flow op to Relay."""
import pytest try:
import tensorflow.compat.v1 as tf tf.disable_v2_behavior() except ImportError:
import tensorflow as tf from tensorflow.python.ops
import control_flow_ops
import numpy as np from tvm
import nd from tvm
import relay from tvm.relay.frontend.tensorflow
import from_tensorflow def check_equal(graph, tf_out, input_map=None): mod, params = from_tensorflow(graph.as_graph_def(add_shapes=True)) if input_map is not None: params.update(input_map) relay_out = relay.create_executor("vm", mod=mod).evaluate()(**params) if isinstance(relay_out, nd.NDArray): np.testing.assert_allclose(tf_out, relay_out.numpy()) else: if not isinstance(tf_out, (list, tuple)): tf_out = [tf_out] for x, y in zip(tf_out, [r.numpy() for r in relay_out]): np.testing.assert_allclose(x, y) def test_vanilla_loop(): graph = tf.Graph() with graph.as_default(): i = tf.constant(0, name="while/constant") def c(i): return tf.less(i, 10) def b(i): return tf.add(i, 1) r = tf.while_loop(c, b, [i]) with tf.Session() as sess: tf_out = sess.run(r) check_equal(graph, tf_out) def test_callnode_loop_vars(): graph = tf.Graph() with graph.as_default(): i = tf.add(tf.constant(0), 1) def c(i): return tf.less(i, 10) def b(i): return tf.add(i, 1) r = tf.while_loop(c, b, [i]) with tf.Session() as sess: tf_out = sess.run(r) check_equal(graph, tf_out) def test_loop_2_vars(): graph = tf.Graph() with graph.as_default(): i0 = tf.constant(0) j0 = tf.ones([2, 2]) def c(i, j): return i < 10 def b(i, j): return [tf.add(i, 1), j] i1, i2 = tf.while_loop(c, b, loop_vars=[i0, j0]) i1 += tf.constant(1337) with tf.Session() as sess: tf_out = sess.run(i1) check_equal(graph, tf_out) def test_loop_3_vars(): graph = tf.Graph() with graph.as_default(): i0 = tf.constant(1) j0 = tf.constant(2) k0 = tf.constant(4) def c(i, j, k): return i < 10 def b(i, j, k): return [i + 1, j * k, k + i]
r = tf.while_loop(c, b, loop_vars=[i0, j0, k0]) with tf.Session() as sess: tf_out = sess.run(r) check_equal(graph, tf_out) def test_loop_conditions(): graph = tf.Graph() with graph.as_default(): i = tf.constant(1) j = tf.constant(1) k = tf.constant(5) def c(i, j, k): return tf.equal( tf.not_equal(tf.less(i + j, 10), tf.less(j * k, 100)), tf.greater_equal(k, i + j) ) def b(i, j, k): return [i + j, j + k, k + 1] r = tf.while_loop(c, b, loop_vars=[i, j, k]) with tf.Session() as sess: tf_out = sess.run(r) check_equal(graph, tf_out) @pytest.mark.skip def test_loop_bodies(): graph = tf.Graph() with graph.as_default(): def body(x): a = tf.constant(np.array([[5, 6], [7, 8]]), dtype=tf.int32) b = tf.constant(np.array([[1, 2], [3, 4]]), dtype=tf.int32) c = a + b return tf.nn.relu(x + c) def condition(x): return tf.reduce_sum(x) < 100 x = tf.constant(0, shape=[2, 2]) r = tf.while_loop(condition, body, [x]) with tf.Session() as sess: tf_out = sess.run(r) check_equal(graph, tf_out) def test_nested_loop(): graph = tf.Graph() with graph.as_default(): def body(x): def nest_body(c): return tf.multiply(c, 2) def cd(c): return tf.less(c, 10) c = tf.constant(2) res = tf.while_loop(cd, nest_body, loop_vars=[c]) return tf.nn.relu(x + res) def condition(x): return tf.greater(x, 100) x = tf.constant(3) r = tf.while_loop(condition, body, loop_vars=[x]) with tf.Session() as sess: tf_out = sess.run(r) check_equal(graph, tf_out) def test_vanilla_cond(): graph = tf.Graph() with graph.as_default(): i = tf.constant(1) j = tf.constant(4) def f
1(): return tf.multiply(1, 17) def f2(): return tf.add(4, 23) r = tf.cond(tf.less(i, j), f1, f2) with tf.Session(graph=graph) as sess: tf_out = sess.run(r) check_equal(graph, tf_out) def test_multiple_cond_vars(): graph = tf.Graph() with graph.as_default(): x1 = tf.constant(7) x2 = tf.constant(12) z = tf.constant(20) r = tf.cond(tf.less(tf.add(x1, x2), 10), lambda: tf.add(10, 2), lambda: tf.square(5)) with tf.Session() as sess: tf_out = sess.run(r) check_equal(graph, tf_out) def test_cond_fn_parameters(): graph = tf.Graph() with graph.as_default(): def fn1(x, y): return tf.multiply(5, 6) def fn2(x, y): return tf.add(3, 4) i = tf.constant(1) j = tf.constant(2) k = tf.constant(3) r = tf.cond(tf.less(i, j), lambda: fn1(i, k), lambda: fn2(j, k)) with tf.Session() as sess: tf_out = sess.run(r, feed_dict={i: 1, j: 2, k: 3}) check_equal(graph, tf_out) def test_nested_cond(): graph = tf.Graph() with graph.as_default(): def fn1(a, b): def nest_fn1(): return tf.add(1, 2) def nest_fn2(): return tf.subtract(10, 5) res = tf.cond(tf.less(1, 2), nest_fn1, nest_fn2) return tf.multiply(tf.add(87, res), 10) def fn2(a, b): return tf.add(10, 10) x = tf.constant(5) y = tf.constant(6) z = tf.constant(7) pred = tf.less(x, y) r = tf.cond(pred, lambda: fn1(x, y), lambda: fn2(y, z)) with tf.Session() as sess: tf_out = sess.run(r, feed_dict={x: 1, y: 2, z: 3, pred: True}) check_equal(graph, tf_out) def test_loop_in_cond(): graph = tf.Graph() with graph.as_default(): def fn1(a, b): i = tf.constant(0) def cd(i): return tf.less(i, 10) def bd(i):
return tf.add(i, 1) res = tf.while_loop(cd, bd, [i]) return tf.multiply(tf.add(20, res), 10) def fn2(a, b): return tf.add(10, 20) x = tf.constant(7) y = tf.constant(20) z = tf.constant(10) pred = tf.less(x, y) r = tf.cond(pred, lambda: fn1(x, y), lambda: fn2(y, z)) with tf.Session() as sess: tf_out = sess.run(r, feed_dict={x: 1, y: 2, z: 3, pred: True}) check_equal(graph, tf_out) def test_cond_in_loop(): graph = tf.Graph() with graph.as_default(): def body(x): x = tf.constant(7) z = tf.constant(20) res = tf.cond(tf.less(x, 10), lambda: tf.add(10, 20), lambda: tf.square(10)) return tf.multiply(res, x) x = tf.constant(21) def condition(x): return tf.less(x, 100) r = tf.while_loop(condition, body, loop_vars=[x]) with tf.Session() as sess: tf_out = sess.run(r) check_equal(graph, tf_out) def test_vanilla_loop_bound(): graph = tf.Graph() with graph.as_default(): dshape = (2, 10) dtype = "float32" dname = "data" np_data = np.random.uniform(size=dshape).astype(dtype) data = tf.placeholder(shape=dshape, dtype=dtype, name=dname) x = tf.slice(data, [1, 4], [1, 4]) outer = x + 5.0 def body(x, y): res = tf.cond(tf.less(y, 10), lambda: tf.add(10.0, 20.0), lambda: tf.square(10.0)) z = tf.constant(7) res = tf.cond(tf.less(z, 10), lambda: res * 5, lambda: res + 10) return tf.multiply(res, x * outer), y + 1 y = tf.constant(0) def condition(x, y): return tf.less(y, 20) r = tf.while_loop(condition, body, loop_vars=[x, y]) with tf.Session() as sess: tf_out = sess.run(r, feed_dict={"%s:0" % dname: np_data}) check_equal(graph, tf_out, {dname: np_data}) def test_nested_loop_bound(): graph = tf.Gra
ph() with graph.as_default(): dshape = (2, 10) dtype = "float32" dname = "data" np_data = np.random.uniform(size=dshape).astype(dtype) data = tf.placeholder(shape=dshape, dtype=dtype, name=dname) x = tf.slice(data, [1, 4], [1, 4]) outer = x + 5.0 def body(x, y): res = tf.cond(tf.less(y, 10), lambda: tf.add(10.0, 20.0), lambda: tf.square(10.0)) def nested_body(nx, ny): return nx + 1, res + 2.0 def nested_cond(nx, ny): return tf.less(nx, 15) nx = tf.constant(0) ny = tf.constant(0.0) nested_res = tf.while_loop(nested_cond, nested_body, loop_vars=[nx, ny]) res = res + nested_res[1] z = tf.constant(7) res = tf.cond(tf.less(z, 10), lambda: res * 5, lambda: res + 10) return tf.multiply(res, x * outer), y + 1 y = tf.constant(0) def condition(x, y): return tf.less(y, 20) r = tf.while_loop(condition, body, loop_vars=[x, y]) with tf.Session() as sess: tf_out = sess.run(r, feed_dict={"%s:0" % dname: np_data}) check_equal(graph, tf_out, {dname: np_data}) def test_switch(): graph = tf.Graph() with graph.as_default(): data_np = np.random.uniform(0, 5, size=(2, 4, 5, 1)).astype("float32") dname = "data" flag_name = "flag" data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name=dname) split = tf.split(data, 2, axis=0) flag = tf.placeholder(shape={}, dtype=tf.bool, name=flag_name) output_false, output_true = control_flow_ops.switch(split[1], flag) with tf.Session() as sess: tf_out = sess.run(output_false, feed_dict={data.name: data_np, flag.name: False}) check_equal(graph, tf_out, {dname: data_np, flag_name: False}) def test_loop_tuple_input(): graph = tf.Graph() with graph.as_default(): data_np = np.random.uniform(0, 5, size=
(2, 4, 5, 1)).astype("float32") dname = "data" data = tf.placeholder(shape=data_np.shape, dtype=data_np.dtype, name=dname) split = tf.split(data, 2, axis=0) def body(x, y): return x + 2, y + 1 start = tf.constant(0) def condition(x, y): return tf.less(y, 20) r = tf.while_loop(condition, body, loop_vars=[split[1], start]) with tf.Session() as sess: tf_out = sess.run(r, feed_dict={data.name: data_np}) check_equal(graph, tf_out, {dname: data_np}) if __name__ == "__main__": test_vanilla_loop() test_loop_2_vars() test_loop_3_vars() test_loop_conditions() test_callnode_loop_vars() test_vanilla_cond() test_multiple_cond_vars() test_cond_fn_parameters() test_nested_loop() test_nested_cond() test_loop_in_cond() test_cond_in_loop() test_vanilla_loop_bound() test_nested_loop_bound() test_switch() test_loop_tuple_input()
"""Unit tests for converting TensorFlow debugging ops to Relay.""" try:
import tensorflow.compat.v1 as tf tf.disable_v2_behavior() except ImportError:
import tensorflow as tf
import numpy as np from tvm
import relay from tvm.relay.frontend.tensorflow
import from_tensorflow def run_relay(graph, shape_dict=None, *vars): mod, params = from_tensorflow(graph.as_graph_def(add_shapes=True), shape=shape_dict) return relay.create_executor("debug", mod=mod).evaluate()(*vars) def test_assert_true(): g = tf.Graph() shape = (1, 2) with g.as_default(): x = tf.placeholder(tf.float32, shape=shape, name="input") assert_op = tf.Assert(tf.reduce_all(tf.less_equal(x, x)), ["it failed"]) with tf.Session() as sess: x_value = np.random.rand(*shape) assert sess.run(assert_op, feed_dict={x: x_value}) is None np.testing.assert_allclose(0, run_relay(g, {"input": shape}).numpy()) def test_assert_true_var_capture(): g = tf.Graph() with g.as_default(): x = tf.placeholder(tf.float32, shape=()) assert_op = tf.Assert(tf.less_equal(x, x), ["it failed", x]) with tf.Session() as sess: x_value = np.random.rand() assert sess.run(assert_op, feed_dict={x: x_value}) is None np.testing.assert_allclose(True, run_relay(g, None, x_value).numpy()) def test_assert_false(): g = tf.Graph() with g.as_default(): assert_op = tf.Assert(tf.constant(False), ["it failed"]) with tf.Session() as sess: try: print(sess.run(assert_op)) assert False except tf.errors.InvalidArgumentError as e: assert "it failed" in e.message np.testing.assert_allclose(0, run_relay(g).numpy()) if __name__ == "__main__": test_assert_true() test_assert_true_var_capture() test_assert_false()
""" Tensorflow testcases ==================== This article is a test script to test tensorflow operator with Relay. """ from __future__
import print_function from distutils.version
import LooseVersion
import threading
import platform
import os.path from packaging
import version as package_version
import numpy as np
import pytest from PIL
import Image from tvm
import relay from tvm.runtime.vm
import VirtualMachine from tvm.relay.frontend.tensorflow
import from_tensorflow from tvm.contrib
import graph_executor from tvm.contrib
import utils
import tvm
import tvm.relay.testing.tf as tf_testing
import tvm.testing from tensorflow.python.framework
import constant_op from tensorflow.python.framework
import graph_util from tensorflow.python.ops
import nn_ops from tensorflow.python.ops
import nn from tensorflow.python.ops
import array_ops from tensorflow.python.ops
import math_ops from tensorflow.python.ops
import variable_scope from tensorflow.python.ops
import variables from tensorflow.python.ops
import init_ops from tensorflow.python.framework
import function from tensorflow.python.framework
import ops from tensorflow.python.framework
import dtypes from tensorflow.python.ops
import gen_functional_ops from tensorflow.python.client
import device_lib try:
import tensorflow.compat.v1 as tf tf.disable_v2_behavior() except ImportError:
import tensorflow as tf gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5) gpu_sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) gpu_sess.close() def convert_to_list(x): if not isinstance(x, list): x = [x] return x tf_dtypes = { "float32": tf.float32, "float16": tf.float16, "float64": tf.float64, "int32": tf.int32, "uint8": tf.uint8, "int8": tf.int8, "int16": tf.int16, "uint16": tf.uint16, "int64": tf.int64, } def vmobj_to_list(o): """Converts TVM objects returned by VM execution to Python List.""" if isinstance(o, tvm.nd.NDArray): return [o.numpy()] elif isinstance(o, tvm.runtime.container.ADT): result = [] for f in o: result.extend(vmobj_to_list(f)) return result elif isinstance(o, tvm.relay.backend.interpreter.ConstructorValue): if o.constructor.name_hint == "Cons": tl = vmobj_to_list(o.fields[1]) hd = vmobj_to_list(o.fields[0]) hd.extend(tl) return hd elif o.constructor.name_hint == "Nil": return [] elif "tensor_nil" in o.constructor.name_hint: return [0] elif "tensor" in o.constructor.name_hint: return [o.fields[0].numpy()] else: raise RuntimeError(f"Unknown object type: {o.constructor.name_hint}") else: raise RuntimeError(f"Unknown object type: {type(o)}") def run_tvm_graph( graph_def, input_data, input_node, num_output=1, target="llvm", out_names=None, opt_level=3, mode="graph_executor", cuda_layout="NCHW", layout=None, disabled_pass=None, ignore_in_shape=False, serialize=False, convert_config=None, ): """Generic function to compile on relay and execute on tvm""" input_data = convert_to_list(input_data) input_node = convert_to_list(input_node) if target == "cuda": layout = cuda_layout target_host = None if ignore_in_sha
pe: shape_dict = None else: shape_dict = { e: i.shape if hasattr(i, "shape") else () for e, i in zip(input_node, input_data) } mod, params = relay.frontend.from_tensorflow( graph_def, layout=layout, shape=shape_dict, outputs=out_names, convert_config=convert_config, ) dev = tvm.device(target, 0) if mode == "debug": inputs = [] for param in mod["main"].params: found = False for i, n in enumerate(input_node): if n == param.name_hint: found = True inputs.append(tvm.nd.array(input_data[i])) break if not found: inputs.append(tvm.nd.array(params[param.name_hint])) result = relay.create_executor(mode, mod=mod, device=tvm.cpu(), target="llvm").evaluate()( *inputs ) return vmobj_to_list(result) elif mode == "vm": with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass): mod = relay.transform.InferType()(mod) vm_exec = relay.vm.compile(mod, target="llvm", params=params) if serialize: code, lib = vm_exec.save() vm_exec = tvm.runtime.vm.Executable.load_exec(code, lib) vm = VirtualMachine(vm_exec, tvm.cpu()) inputs = {} for e, i in zip(input_node, input_data): inputs[e] = tvm.nd.array(i) result = vm.invoke("main", **inputs) return vmobj_to_list(result) else: with tvm.transform.PassContext(opt_level=opt_level, disabled_pass=disabled_pass): target = tvm.target.Target(target, target_host) graph, lib, params = relay.build(mod, target=target, params=params) m = graph_executor.create(graph, lib, dev) for e, i in zip(input_node, input_data): if e != "": m.set_input(e, tvm.nd.array(i)) m.set_input(**params)
m.run() assert out_names is None or num_output == len( out_names ), f"out_names: {out_names} num_output: {num_output}" tvm_output_list = [m.get_output(i).numpy() for i in range(num_output)] return tvm_output_list def run_tf_graph(sess, input_data, input_node, output_node): """Generic function to execute tensorflow""" input_data = convert_to_list(input_data) input_node = convert_to_list(input_node) output_node = convert_to_list(output_node) tensor = [sess.graph.get_tensor_by_name(output_name) for output_name in output_node] input_dict = {e: input_data[i] for i, e in enumerate(input_node)} if len(input_node) == 1 and input_node[0] == "": output_data = sess.run(tensor) else: output_data = sess.run(tensor, input_dict) return output_data def compare_tf_with_tvm( in_data, in_name, out_name, init_global_variables=False, no_gpu=False, opt_level=3, mode="graph_executor", cuda_layout="NCHW", add_shapes_to_graph_def=True, targets=None, ignore_in_shape=False, convert_config=None, ): """Generic function to generate and compare tensorflow and TVM output""" def name_without_num(name): return name.split(":")[0] if ":" in name else name out_name = convert_to_list(out_name) out_node = [name_without_num(name) for name in out_name] in_data = convert_to_list(in_data) in_name = convert_to_list(in_name) in_node = [name_without_num(name) for name in in_name] with tf.Session() as sess: if init_global_variables: sess.run(variables.global_variables_initializer()) final_graph_def = ( tf_testing.AddShapesToGraphDef(sess, out_node) if add_shapes_to_graph_def else tf.get_default_graph().as_graph_def() ) tf_output = run_tf_graph(sess, in_data, in_name, out_name) devices = targets if targets else ["llvm", "cuda"] for device in devices:
_ = tvm.device(device, 0) if not tvm.testing.device_enabled(device): print(f"Skip because {device} is not enabled") continue if no_gpu and device == "cuda": continue if "cublas" in device and not tvm.get_global_func("tvm.contrib.cublas.matmul", True): print(f"Skip because cublas is not enabled: {device}") continue tvm_output = run_tvm_graph( final_graph_def, in_data, in_node, target=device, out_names=out_name, num_output=len(out_name), opt_level=opt_level, mode=mode, cuda_layout=cuda_layout, ignore_in_shape=ignore_in_shape, convert_config=convert_config, ) for i, tf_out in enumerate(tf_output): if not isinstance(tf_out, np.ndarray): assert len(tvm_output[i].shape) == 0 tvm.testing.assert_allclose(tf_out, tvm_output[i], atol=1e-5, rtol=1e-5) sess.close() def is_gpu_available(): """Verify gpu is available""" local_device_protos = device_lib.list_local_devices() gpu_list = [x.name for x in local_device_protos if x.device_type == "GPU"] if gpu_list: print("Tensorflow GPU:", gpu_list) return True else: return False def _test_pooling_iteration(input_shape, **kwargs): """One iteration of pool operation with given shapes and attributes""" x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1 with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=input_shape, dtype="float32") nn_ops.pool(in_data, **kwargs) if kwargs["pooling_type"] == "MAX": out_name = "max_pool:0" else: out_name = "avg_pool:0" compare_tf_with_tvm(x, "Placeholder:0
", out_name) def _test_pooling(input_shape, **kwargs): _test_pooling_iteration(input_shape, **kwargs) if is_gpu_available(): if len(input_shape) == 4: input_shape = [input_shape[ii] for ii in (0, 3, 1, 2)] if isinstance(kwargs["padding"], list): kwargs["padding"] = [kwargs["padding"][ii] for ii in (0, 3, 1, 2)] kwargs["data_format"] = "NCHW" _test_pooling_iteration(input_shape, **kwargs) def _test_pooling_dynamic(input_shape, np_shape, **kwargs): """Pooling with dynamic height and width dimensions.""" x = -np.arange(np.prod(np_shape), dtype=np.float32).reshape(np_shape) - 1 with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=input_shape, dtype="float32") nn_ops.pool(in_data, **kwargs) if kwargs["pooling_type"] == "MAX": out_name = "max_pool:0" else: out_name = "avg_pool:0" compare_tf_with_tvm(x, "Placeholder:0", out_name, mode="vm", ignore_in_shape=True) @tvm.testing.uses_gpu def test_forward_pooling(): """Pooling""" for pool_type in ["AVG", "MAX"]: _test_pooling( input_shape=[1, 3, 32, 32, 32], window_shape=[2, 2, 2], padding="VALID", pooling_type=pool_type, dilation_rate=[1, 1, 1], strides=[2, 2, 2], ) _test_pooling( input_shape=[1, 3, 32, 32, 32], window_shape=[1, 1, 1], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1, 1], strides=[1, 1, 1], ) _test_pooling( input_shape=[1, 3, 32, 32, 32], window_shape=[2, 2, 2], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1, 1], strides=[2, 2, 2], ) _test_pooling_dynamic( input_shape=[1, None, None, 3], np_shape=[1, 32, 32, 3], window_shape=[2,
2], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1], strides=[1, 1], ) if is_gpu_available(): _test_pooling( input_shape=[1, 3, 32, 32, 32], window_shape=[1, 1, 1], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1, 1], strides=[1, 1, 1], data_format="NCDHW", ) _test_pooling( input_shape=[1, 3, 32, 32, 32], window_shape=[2, 2, 2], padding="VALID", pooling_type=pool_type, dilation_rate=[1, 1, 1], strides=[2, 2, 2], data_format="NCDHW", ) _test_pooling( input_shape=[2, 9, 10, 2], window_shape=[1, 1], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1], strides=[1, 1], ) _test_pooling( input_shape=[2, 10, 9, 2], window_shape=[1, 1], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1], strides=[1, 1], ) _test_pooling( input_shape=[2, 9, 10, 2], window_shape=[2, 1], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1], strides=[1, 1], ) _test_pooling( input_shape=[2, 10, 9, 2], window_shape=[2, 3], padding="SAME", pooling_type=pool_type, dilation_rate=[1, 1], strides=[2, 1], ) _test_pooling( input_shape=[1, 1, 2, 1], window_shape=[1, 1], padding="VALID", pooling_type=pool_type, dilation_rate=[1, 2], ) _test_pooling( input_shape=[1, 2, 1], window_shape=[1]
, padding="VALID", pooling_type=pool_type, dilation_rate=[2], ) if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"): _test_pooling( input_shape=[2, 9, 10, 2], window_shape=[4, 4], padding=[[0, 0], [0, 1], [2, 3], [0, 0]], pooling_type="MAX", dilation_rate=[1, 1], strides=[1, 1], ) def _test_convolution( opname, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, deconv_output_shape=None, add_shapes_to_graph_def=True, ): """One iteration of convolution with given shapes and attributes""" deconv_output_shape = deconv_output_shape or [] total_size_1 = np.prod(tensor_in_sizes) total_size_2 = np.prod(filter_in_sizes) data_array = [f * 1.0 for f in range(1, total_size_1 + 1)] filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)] with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32") in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32") if data_format == "NHWC": strides = [1] + strides + [1] dilations = [1] + dilations + [1] else: strides = [1, 1] + strides dilations = [1, 1] + dilations if opname == "conv": nn_ops.conv2d( in_data, in_filter, strides=strides, dilations=dilations, padding=padding, data_format=data_format, ) compare_tf_with_tvm( np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "Conv2D:0", add_shapes_to_graph_def=add_shapes_to_graph_def, ) elif opname == "conv_transpose": nn_ops.conv2d_transpose( in_data,
in_filter, output_shape=deconv_output_shape, strides=strides, padding=padding, data_format=data_format, ) compare_tf_with_tvm( np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "conv2d_transpose:0", add_shapes_to_graph_def=add_shapes_to_graph_def, ) else: nn_ops.depthwise_conv2d_native( in_data, in_filter, strides=strides, dilations=dilations, padding=padding, data_format=data_format, ) compare_tf_with_tvm( np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "DepthwiseConv2dNative:0", add_shapes_to_graph_def=add_shapes_to_graph_def, ) @pytest.mark.skip(reason="See https: @tvm.testing.uses_gpu def test_forward_convolution(): """Convolution""" if is_gpu_available(): _test_convolution("conv", [4, 176, 8, 8], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NCHW") _test_convolution("conv", [4, 19, 17, 17], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NCHW") _test_convolution("conv", [4, 124, 17, 17], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NCHW") _test_convolution("conv", [4, 12, 17, 17], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NCHW") _test_convolution( "depthwise", [4, 176, 8, 8], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NCHW" ) _test_convolution( "depthwise", [4, 19, 17, 17], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NCHW" ) _test_convolution( "depthwise", [4, 124, 17, 17], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NCHW" ) _test_convolution( "depthwise", [4, 12, 17, 17], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NCHW" ) _test
_convolution( "depthwise", [4, 12, 17, 17], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NCHW" ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NCHW", [4, 176, 8, 8], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [2, 2, 176, 32], [1, 1], [1, 1], "SAME", "NCHW", [4, 176, 8, 8], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [2, 2, 176, 32], [1, 1], [2, 2], "SAME", "NCHW", [4, 176, 15, 15], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [3, 3, 176, 32], [1, 1], [1, 1], "SAME", "NCHW", [4, 176, 8, 8], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [3, 3, 176, 32], [1, 1], [2, 2], "SAME", "NCHW", [4, 176, 15, 15], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [3, 3, 176, 32], [1, 1], [2, 2], "SAME", "NCHW", [4, 176, 16, 16], ) _test_convolution( "conv_transpose", [4, 19, 8, 8], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NCHW", [4, 19, 17, 17], ) _test_convolution( "conv_transpose", [4, 19, 17, 17], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NCHW", [4, 124, 17, 17], ) _test_convolution( "conv_transpose", [4, 19,
17, 17], [3, 3, 124, 19], [1, 1], [1, 1], "SAME", "NCHW", [4, 124, 17, 17], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NCHW", [4, 12, 17, 17], ) _test_convolution( "conv_transpose", [4, 19, 8, 8], [2, 2, 19, 19], [1, 1], [2, 2], "VALID", "NCHW", [4, 19, 16, 16], ) _test_convolution( "conv_transpose", [4, 32, 8, 8], [2, 2, 12, 32], [1, 1], [2, 2], "VALID", "NCHW", [4, 12, 16, 16], ) _test_convolution( "conv_transpose", [1, 19, 8, 8], [1, 1, 1, 19], [1, 1], [1, 1], "VALID", "NCHW", [1, 1, 8, 8], ) _test_convolution("conv", [4, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC") _test_convolution("conv", [4, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC") _test_convolution("conv", [4, 17, 17, 124], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC") _test_convolution("conv", [4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC") _test_convolution( "conv", [4, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC", add_shapes_to_graph_def=False, ) _test_convolution("depthwise", [4, 8, 8, 176], [1, 1, 176, 1], [1, 1], [1, 1], "SAME", "NHWC") _test_convolution("depthwise", [4, 17, 17, 19], [3, 3, 19, 1], [1, 1], [2, 2], "VALID", "NHWC") _test_convolution("depthwise", [4, 17, 17, 124], [1, 1, 124, 1], [1, 1], [1, 1], "SAME", "NHWC") _test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 1
], [1, 1], [2, 2], "VALID", "NHWC") _test_convolution("depthwise", [4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NHWC") _test_convolution( "depthwise", [4, 17, 17, 12], [3, 3, 12, 2], [1, 1], [2, 2], "VALID", "NHWC", add_shapes_to_graph_def=False, ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC", [4, 8, 8, 176], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [2, 2, 176, 32], [1, 1], [1, 1], "SAME", "NHWC", [4, 8, 8, 176], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [2, 2, 176, 32], [1, 1], [2, 2], "SAME", "NHWC", [4, 15, 15, 176], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [3, 3, 176, 32], [1, 1], [1, 1], "SAME", "NHWC", [4, 8, 8, 176], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [3, 3, 176, 32], [1, 1], [2, 2], "SAME", "NHWC", [4, 15, 15, 176], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [3, 3, 176, 32], [1, 1], [2, 2], "SAME", "NHWC", [4, 16, 16, 176], ) _test_convolution( "conv_transpose", [4, 8, 8, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC", [4, 17, 17, 19], ) _test_convolution( "conv_transpose", [4, 17, 17, 19], [1, 1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC", [4, 17, 17, 124], ) _test_convolution( "conv_transpose", [4, 17, 17, 19], [3, 3, 124, 19], [1, 1], [1, 1], "SAME", "NHWC",
[4, 17, 17, 124], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC", [4, 17, 17, 12], ) _test_convolution( "conv_transpose", [4, 8, 8, 19], [2, 2, 19, 19], [1, 1], [2, 2], "VALID", "NHWC", [4, 16, 16, 19], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [2, 2, 12, 32], [1, 1], [2, 2], "VALID", "NHWC", [4, 16, 16, 12], ) _test_convolution( "conv_transpose", [1, 8, 8, 19], [1, 1, 1, 19], [1, 1], [1, 1], "VALID", "NHWC", [1, 8, 8, 1], ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC", [4, 8, 8, 176], add_shapes_to_graph_def=False, ) if package_version.parse(tf.VERSION) >= package_version.parse("2.4.1"): _test_convolution( "conv", [4, 8, 8, 16], [1, 1, 16, 32], [1, 1], [1, 1], [[0, 0], [2, 3], [0, 1], [0, 0]], "NHWC", ) _test_convolution( "depthwise", [4, 8, 8, 16], [1, 1, 16, 1], [1, 1], [1, 1], [[0, 0], [2, 3], [0, 1], [0, 0]], "NHWC", ) _test_convolution( "conv_transpose", [4, 8, 8, 32], [3, 3, 176, 32], [1, 1], [2, 2], [[0, 0], [1, 0], [1, 0], [0, 0]], "NHWC", [4, 16, 16, 176], ) def _test_convolution3d( opname, tensor_in_sizes, filter_in_sizes, dilations, strides, padding, data_format, deconv_output_shape=None, add_shapes_to_graph_def=True, ): """One iteration of
3D convolution with given shapes and attributes""" deconv_output_shape = deconv_output_shape or [] total_size_1 = np.prod(tensor_in_sizes) total_size_2 = np.prod(filter_in_sizes) data_array = [f * 1.0 for f in range(1, total_size_1 + 1)] filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)] with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32") in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype="float32") if data_format == "NDHWC": strides = [1] + strides + [1] dilations = [1] + dilations + [1] else: strides = [1, 1] + strides dilations = [1, 1] + dilations if opname == "conv": nn_ops.conv3d( in_data, in_filter, strides=strides, dilations=dilations, padding=padding, data_format=data_format, ) compare_tf_with_tvm( np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "Conv3D:0", cuda_layout="NCDHW", add_shapes_to_graph_def=add_shapes_to_graph_def, ) @tvm.testing.uses_gpu def test_forward_convolution3d(): """Convolution3d""" if is_gpu_available(): _test_convolution3d( "conv", [4, 176, 8, 8, 8], [1, 1, 1, 176, 32], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW" ) _test_convolution3d( "conv", [4, 19, 17, 17, 17], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW" ) _test_convolution3d( "conv", [4, 124, 17, 17, 17], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NCDHW" ) _test_convolution3d( "conv", [4, 12, 17, 17, 17], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NCDHW" ) _test_convolution3d( "conv", [4, 8, 8, 8, 176], [1, 1, 1, 176, 3
2], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC" ) _test_convolution3d( "conv", [4, 17, 17, 17, 19], [3, 3, 3, 19, 19], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC" ) _test_convolution3d( "conv", [4, 17, 17, 17, 124], [1, 1, 1, 124, 19], [1, 1, 1], [1, 1, 1], "SAME", "NDHWC" ) _test_convolution3d( "conv", [4, 17, 17, 17, 12], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC" ) _test_convolution3d( "conv", [4, 17, 17, 17, 12], [3, 3, 3, 12, 32], [1, 1, 1], [2, 2, 2], "VALID", "NDHWC", add_shapes_to_graph_def=False, ) def _test_convolution3d_transpose( data_shape, filter_shape, strides, padding, output_shape, data_format="NCDHW", add_shapes_to_graph_def=True, ): """One iteration of 3D convolution transpose with given shapes and attributes""" dtype = "float32" data_array = np.random.uniform(size=data_shape).astype(dtype) filter_array = np.random.uniform(size=filter_shape).astype(dtype) if data_format == "NDHWC": strides = [1] + strides + [1] else: strides = [1, 1] + strides with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data_shape, dtype=dtype) in_filter = constant_op.constant(filter_array, shape=filter_shape, dtype=dtype) nn_ops.conv3d_transpose( in_data, in_filter, output_shape=output_shape, strides=strides, padding=padding, data_format=data_format, ) compare_tf_with_tvm( data_array, "Placeholder:0", "conv3d_transpose:0", cuda_layout="NDHWC", add_shapes_to_graph_def=add_shapes_to_graph_def, ) @tvm.testing.uses_gpu def test_forward_convolution3d_transpose(): """Convolution3d transpose""" if is_gpu_available(): _test_convolution3d_transpose( data_shape=[1, 10, 8, 8, 8], filt
er_shape=[1, 1, 1, 6, 10], strides=[1, 1, 1], padding="VALID", output_shape=[1, 6, 8, 8, 8], ) _test_convolution3d_transpose( data_shape=[4, 9, 8, 8, 8], filter_shape=[1, 1, 1, 6, 9], strides=[1, 1, 1], padding="VALID", output_shape=[4, 6, 8, 8, 8], ) _test_convolution3d_transpose( data_shape=[1, 3, 8, 8, 8], filter_shape=[1, 1, 1, 6, 3], strides=[2, 2, 2], padding="SAME", output_shape=[1, 6, 15, 15, 15], ) _test_convolution3d_transpose( data_shape=[1, 16, 8, 8, 8], filter_shape=[3, 3, 3, 6, 16], strides=[3, 3, 3], padding="VALID", output_shape=[1, 6, 24, 24, 24], ) _test_convolution3d_transpose( data_shape=[1, 8, 8, 8, 10], filter_shape=[1, 1, 1, 6, 10], strides=[1, 1, 1], padding="VALID", output_shape=[1, 8, 8, 8, 6], data_format="NDHWC", ) _test_convolution3d_transpose( data_shape=[4, 8, 8, 8, 9], filter_shape=[1, 1, 1, 6, 9], strides=[1, 1, 1], padding="VALID", output_shape=[4, 8, 8, 8, 6], data_format="NDHWC", ) _test_convolution3d_transpose( data_shape=[1, 8, 8, 8, 3], filter_shape=[1, 1, 1, 6, 3], strides=[2, 2, 2], padding="SAME", output_shape=[1, 15, 15, 15, 6], data_format="NDHWC", ) _test_convolution3d_transpose( data_shape=[1, 8, 8, 8, 16], filter_shape=[3, 3, 3, 6, 16], strides=[3, 3, 3], padding="VALID", output_shape=[1, 24, 24, 24, 6], data_format="NDHWC", ) _test_convolution3d_transpose( data_shape=[1, 8, 8, 8, 16], filter_shape=[3, 3, 3, 6, 16], strides=[3, 3, 3], padding="VALID", output_shape=[1, 24, 24, 24, 6], data_format="NDHWC", add_
shapes_to_graph_def=False, ) def _test_biasadd(tensor_in_sizes, data_format): """One iteration of biasadd with given shapes and attributes""" total_size_1 = 1 for s in tensor_in_sizes: total_size_1 *= s tensor_bias_sizes = [tensor_in_sizes[1]] if data_format == "NCHW" else [tensor_in_sizes[3]] total_size_2 = tensor_bias_sizes[0] data_array = [f * 1.0 for f in range(1, total_size_1 + 1)] bias_array = [f * 1.0 for f in range(1, total_size_2 + 1)] with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32") in_bias = constant_op.constant(bias_array, shape=tensor_bias_sizes, dtype="float32") nn_ops.bias_add(in_data, in_bias, data_format=data_format) compare_tf_with_tvm( np.reshape(data_array, tensor_in_sizes).astype("float32"), "Placeholder:0", "BiasAdd:0" ) @tvm.testing.uses_gpu def test_forward_biasadd(): """Bias add""" if is_gpu_available(): _test_biasadd([4, 176, 8, 8], "NCHW") _test_biasadd([1, 100, 1, 1], "NCHW") _test_biasadd([4, 19, 17, 17], "NCHW") _test_biasadd([4, 124, 3, 3], "NCHW") _test_biasadd([4, 8, 8, 176], "NHWC") _test_biasadd([1, 1, 1, 100], "NHWC") _test_biasadd([4, 17, 17, 19], "NHWC") _test_biasadd([4, 3, 3, 124], "NHWC") def _test_forward_where(input_shape): with tf.Graph().as_default(): dtype = tf.float32 t = tf.constant( np.random.choice([0, 1, -2, 3, -1, 0.1, -0.2], size=input_shape).astype(dtype.name) ) out = tf.where(t) compare_tf_with_tvm([], [], out.name, mode="debug") compare_tf_with_tvm([], [], out.name, mode="vm") def test_forward_argwhere(): _test_forward_where((5,)) _test_forward_where((5, 5)) _test_forward_where((5, 5, 5)) _test_forward_where((5, 5, 5, 5)) _test_forward_where((5, 5, 5, 5, 5)) def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"):