text
stringlengths
1
2.05k
ape operation with given data and out shape""" if quantized: with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in") inq_data = tf.quantization.fake_quant_with_min_max_args( in_data, min=-100, max=100, name="inq_0" ) input_range = {"inq_0": (-100, 100)} out_shape = out_shape if not wrap_shape else np.array(out_shape, dtype=np.int32) in_shape = ( out_shape if not wrap_shape else array_ops.placeholder( shape=out_shape.shape, dtype=out_shape.dtype, name="Newshape" ) ) out = array_ops.reshape(inq_data, in_shape) out = tf.quantization.fake_quant_with_min_max_args(out, min=-200, max=200, name="out") compare_tflite_with_tvm( [data, out_shape] if wrap_shape else [data], ["inq_0:0", "Newshape:0"] if wrap_shape else ["inq_0:0"], [inq_data, in_shape] if wrap_shape else [inq_data], [out], quantized=True, input_range=input_range, mode="vm", ) else: with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out_shape = out_shape if not wrap_shape else np.array(out_shape, dtype=np.int32) in_shape = ( out_shape if not wrap_shape else array_ops.placeholder( shape=out_shape.shape, dtype=out_shape.dtype, name="Newshape" ) ) out = array_ops.reshape(in_data, in_shape) compare_tflite_with_tvm( [data, out_shape] if wrap_shape else [data], ["Placeholder:0", "Newshape:0"] if wrap_shape else ["Placeholder:0"], [in_data, in_shape] if wrap_shape else [in_data],
[out], mode="vm", ) def test_forward_reshape(): for wrap in [True, False]: _test_reshape(np.arange(6.0, dtype=np.float32), [2, 3], wrap) _test_reshape(np.arange(6), [-1, 2], wrap) _test_reshape(np.arange(6), [3, -1], wrap) _test_reshape(np.arange(6), [-1], wrap) _test_reshape(np.arange(6, dtype=np.uint8), [2, 3], False, True) _test_reshape(np.arange(6, dtype=np.uint8), [-1, 2], False, True) def _test_resize( tf_resize_op, images_data, size_data, align_corners, half_pixel_centers, quantized=False ): """One iteration of Resize""" with tf.Graph().as_default(): images_tensor = array_ops.placeholder(shape=images_data.shape, dtype="float32", name="in") size = ops.convert_to_tensor(size_data, dtype=size_data.dtype) if quantized: images_tensor_q = tf.quantization.fake_quant_with_min_max_args( images_tensor, min=-3, max=2, name="in" ) input_range = {"in": (-3, 2)} out_tensor = tf_resize_op( images=images_tensor_q, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, ) out_tensor = tf.quantization.fake_quant_with_min_max_args( out_tensor, min=-3, max=2, name="out_tensor" ) compare_tflite_with_tvm( [images_data], ["in:0"], [images_tensor], [out_tensor], quantized=True, input_range=input_range, ) else: out_tensor = tf_resize_op( images=images_tensor, size=size, align_corners=align_corners, half_pixel_centers=half_pixel_centers, ) compare_tflite_with_tvm([images_data], ["in:0"], [images_tensor], [out_tensor]) def test_all_resize(): """Resize""" images_data = np.ran
dom.uniform(0, 255, (1, 16, 16, 3)) images_data_float32 = images_data.astype(np.float32) images_data_uint8 = images_data.astype(np.uint8) size_data = np.array([8, 8]).astype("int32") _test_resize( tf.image.resize_bilinear, images_data_float32, size_data, align_corners=False, half_pixel_centers=False, quantized=False, ) _test_resize( tf.image.resize_bilinear, images_data_float32, size_data, align_corners=True, half_pixel_centers=False, quantized=False, ) _test_resize( tf.image.resize_bilinear, images_data_uint8, size_data, align_corners=False, half_pixel_centers=False, quantized=True, ) _test_resize( tf.image.resize_bilinear, images_data_uint8, size_data, align_corners=True, half_pixel_centers=False, quantized=True, ) _test_resize( tf.image.resize_bilinear, images_data_uint8, size_data, align_corners=False, half_pixel_centers=True, quantized=True, ) if "RESIZE_NEAREST_NEIGHBOR" in dir(BuiltinOperator()): _test_resize( tf.image.resize_nearest_neighbor, images_data_float32, size_data, align_corners=False, half_pixel_centers=False, ) _test_resize( tf.image.resize_nearest_neighbor, images_data_float32, size_data, align_corners=True, half_pixel_centers=False, ) _test_resize( tf.image.resize_nearest_neighbor, images_data_float32, size_data, align_corners=False, half_pixel_centers=True, ) def _test_range(start, limit, delta): if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): tf.reset_default_graph() with tf.Graph().as_default():
start_scalar, limit_scalar, delta_scalar = ( tf.placeholder(dtype=start.dtype, shape=(), name="start"), tf.placeholder(dtype=limit.dtype, shape=(), name="limit"), tf.placeholder(dtype=delta.dtype, shape=(), name="delta"), ) out = tf.range(start_scalar, limit_scalar, delta_scalar, name="range") compare_tflite_with_tvm( [start, limit, delta], ["start", "limit", "delta"], [start_scalar, limit_scalar, delta_scalar], [out], mode="vm", quantized=False, ) def _test_range_default(): if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): tf.reset_default_graph() with tf.Graph().as_default(): inputs = [ tf.placeholder(dtype=tf.int32, shape=(), name="p1"), tf.placeholder(dtype=tf.int32, shape=(), name="p2"), ] outputs = [ tf.range(start=inputs[0], limit=inputs[1]), tf.range( start=inputs[1] ), ] compare_tflite_with_tvm( [np.int32(1), np.int32(18)], ["p1", "p2"], inputs, outputs, mode="vm" ) def test_forward_range(): _test_range(np.int32(1), np.int32(18), np.int32(3)) _test_range(np.int32(1), np.int32(18), np.float32(3.1)) _test_range(np.float32(1.0), np.int32(18), np.int32(3.1)) _test_range_default() def _test_shape(dtype): if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): tf.reset_default_graph() with tf.Graph().as_default(): data = np.array([1, 18, 3], dtype=np.int32) start = tf.placeholder(dtype=tf.int32, shape=[], name="start") limit = tf.placeholder(dtype=tf.int32, shape=[], name="limit") delta = tf.placeholder(dtype=tf.int32, shape=[], name="delta") tf_
range = tf.range(start, limit, delta, tf.int32, name="range") out = tf.shape(tf_range, out_type=dtype) out = tf.add(out, tf.constant([1], dtype=dtype)) compare_tflite_with_tvm( list(np.nditer(data)), ["start", "limit", "delta"], [start, limit, delta], [out], mode="vm", ) def test_forward_shape(): _test_shape(tf.int32) _test_shape(tf.int64) def _test_concatenation(data, axis): """One iteration of concatenation""" assert len(data) >= 1 with tf.Graph().as_default(): in_data = [ array_ops.placeholder(shape=tensor.shape, dtype=tensor.dtype, name=f"in_{idx}") for idx, tensor in enumerate(data) ] out = array_ops.concat(in_data, axis) name = [f"in_{idx}:0" for idx in range(len(data))] compare_tflite_with_tvm(data, name, in_data, [out]) def test_forward_concatenation(): _test_concatenation([np.arange(6).reshape((1, 2, 1, 3)), np.arange(6).reshape((1, 2, 1, 3))], 1) _test_concatenation([np.arange(6).reshape((3, 2)), np.arange(6).reshape((3, 2))], 1) _test_concatenation( [ np.arange(6).reshape((2, 1, 1, 3)), np.arange(6).reshape((2, 1, 1, 3)), np.arange(6).reshape((2, 1, 1, 3)), ], 1, ) def _test_unary_elemwise(math_op, data, quantized, quant_range=(-6, 6), int_quant_dtype=tf.int8): """One iteration of unary elemwise""" if quantized: with tf.Graph().as_default(): quant_min, quant_max = quant_range in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") inq_data = tf.quantization.fake_quant_with_min_max_args( in_data, min=quant_min, max=quant_max, name="inq_0" ) input_range = {"inq_0": (quant_min, quant_max)} out = math_op(inq_data) out = tf.quantization.fake_quant_with_min_max_arg
s( out, min=quant_min, max=quant_max, name="out" ) compare_tflite_with_tvm( data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range, experimental_new_converter=True, int_quant_dtype=int_quant_dtype, ) else: with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in") out = math_op(in_data) compare_tflite_with_tvm(data, ["in:0"], [in_data], [out]) def _unary_elewise_create_model(math_op, data, offset=0, int_quant_dtype=tf.int8):
class Model(tf.Module): @tf.function def tf_function(self, x): op = math_op(x) return op if int_quant_dtype in (tf.int8, tf.uint8): _ = "int8" elif int_quant_dtype in (tf.int16, tf.uint16): _ = "int16" else: raise Exception(f"Unsupported dtype '{int_quant_dtype}' for unary elementwise test.") model = Model() export_dir = tempfile.gettempdir() + "/tf_model" tf.saved_model.save( model, export_dir, signatures=model.tf_function.get_concrete_function( tf.TensorSpec(data.shape, tf.float32, name="input") ), ) def representative_dataset(): for _ in range(100): tmp_data = np.random.rand(*tuple(data.shape)) yield [tmp_data.astype(np.float32) * 2 - offset] converter = tf.lite.TFLiteConverter.from_saved_model(export_dir) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.representative_dataset = representative_dataset if int_quant_dtype in (tf.int16, tf.uint16): converter.target_spec.supported_ops = [ tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 ] else: converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8] converter.inference_input_type = int_quant_dtype converter.inference_output_type = int_quant_dtype tflite_model = converter.convert() return tflite_model def _test_abs(data, quantized, int_quant_dtype=tf.int8): """One iteration of abs""" if quantized: tflite_model_quant = _unary_elewise_create_model( tf.math.abs, data, offset=1, int_quant_dtype=int_quant_dtype ) tflite_output = run_tflite_graph(tflite_model_quant, data) if tf.__version__ < LooseVersion("2.6.1"): in_node = ["serving_default_input_int8"] elif tf.__version__ < LooseVersion("2.9"): in_node = ( ["serving_default_input_int16"]
if int_quant_dtype == tf.int16 else ["tfl.quantize"] ) else: in_node = "serving_default_input" tvm_output = run_tvm_graph(tflite_model_quant, data, in_node) tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2 ) else: return _test_unary_elemwise(math_ops.abs, data, quantized, int_quant_dtype=int_quant_dtype) def _test_rsqrt(data, quantized, int_quant_dtype=tf.int8): """One iteration of rsqrt""" if tf.__version__ < LooseVersion("2.6.1") or not quantized: return _test_unary_elemwise( math_ops.rsqrt, data, quantized, quant_range=[1, 6], int_quant_dtype=int_quant_dtype ) else: tflite_model_quant = _unary_elewise_create_model( tf.math.rsqrt, data, int_quant_dtype=int_quant_dtype ) tflite_output = run_tflite_graph(tflite_model_quant, data) if tf.__version__ < LooseVersion("2.9"): in_node = ["tfl.quantize"] else: in_node = "serving_default_input" tvm_output = run_tvm_graph(tflite_model_quant, data, in_node) tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2 ) def _test_ceil(data, quantized, int_quant_dtype=tf.int8): """One iteration of ceil""" return _test_unary_elemwise(math_ops.ceil, data, quantized, int_quant_dtype=int_quant_dtype) def _test_floor(data, quantized, int_quant_dtype=tf.int8): """One iteration of floor""" return _test_unary_elemwise(math_ops.floor, data, quantized, int_quant_dtype=int_quant_dtype) def _test_round(data, quantized, int_quant_dtype=tf.int8): """One iteration of round""" return _test_unary_elemwise(math_ops.round, data, quantized, int_quant_dtype=int_quant_dtype) def _test_exp(data, quantized, int_quant_dtype=tf.int8): """One iteration of exp""" return _test_unary_elemwise(math_ops.e
xp, data, quantized, int_quant_dtype=int_quant_dtype) def _test_log(data, quantized, int_quant_dtype=tf.int8): """One iteration of log""" return _test_unary_elemwise( math_ops.log, data, quantized, quant_range=[1, 6], int_quant_dtype=int_quant_dtype ) def _test_sin(data, quantized, int_quant_dtype=tf.int8): """One iteration of sin""" return _test_unary_elemwise(math_ops.sin, data, quantized, int_quant_dtype=int_quant_dtype) def _test_cos(data, quantized, int_quant_dtype=tf.int8): """One iteration of cos""" if quantized: tflite_model_quant = _unary_elewise_create_model( tf.math.cos, data, int_quant_dtype=int_quant_dtype ) tflite_output = run_tflite_graph(tflite_model_quant, data) if tf.__version__ < LooseVersion("2.9"): in_node = ["tfl.quantize"] else: in_node = "serving_default_input" tvm_output = run_tvm_graph(tflite_model_quant, data, in_node) tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2 ) else: return _test_unary_elemwise(math_ops.cos, data, quantized) def _test_tan(data, quantized, int_quant_dtype=tf.int8): """One iteration of tan""" return _test_unary_elemwise(math_ops.tan, data, quantized, int_quant_dtype=int_quant_dtype) def _test_square(data, quantized, int_quant_dtype=tf.int8): """One iteration of square""" return _test_unary_elemwise(math_ops.square, data, quantized, int_quant_dtype=int_quant_dtype) def _test_neg(data, quantized, int_quant_dtype=tf.int8): """One iteration of neg""" return _test_unary_elemwise(math_ops.neg, data, quantized, int_quant_dtype=int_quant_dtype) def _test_sqrt(data, quantized, int_quant_dtype=tf.int8): """One iteration of sqrt""" return _test_unary_elemwise( math_ops.sqrt, data, quantized, quant_range=[1, 6], int_quant_dtype=int_quant_dtype ) def _test_elu(data
, quantized, int_quant_dtype=tf.int8): """One iteration of elu""" return _test_unary_elemwise(nn_ops.elu, data, quantized, int_quant_dtype=int_quant_dtype) def _test_forward_unary_elemwise(test_op, int_quant_dtype=None, quantized=True, negative=True): in_data, inq_data = [], [] np_dtype = int_quant_dtype.as_numpy_dtype if int_quant_dtype else np.uint8 if quantized: inq_data.append(np.arange(1, 240, 40, dtype=np_dtype)) inq_data.append(np.arange(1, 240, 40, dtype=np_dtype).reshape((2, 1, 3))) if int_quant_dtype == np.int8: inq_data.append(np.arange(-128, 127, 45, dtype=np.int8)) for data in inq_data: test_op(data, quantized=True, int_quant_dtype=int_quant_dtype) if negative: in_data.append(np.arange(-2.0, 4.0, dtype=np.float32)) in_data.append(np.arange(-2.0, 4.0, dtype=np.float32).reshape((2, 1, 3))) else: in_data.append(np.arange(1.0, 7.0, dtype=np.float32)) in_data.append(np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3))) for data in in_data: test_op(data, quantized=False, int_quant_dtype=int_quant_dtype) def test_all_unary_elemwise(): """All unary elemwise""" _test_forward_unary_elemwise(_test_abs, int_quant_dtype=tf.int8) _test_forward_unary_elemwise(_test_abs, int_quant_dtype=tf.int16) _test_forward_unary_elemwise(_test_floor) _test_forward_unary_elemwise(_test_exp) _test_forward_unary_elemwise(_test_log, negative=False) _test_forward_unary_elemwise(_test_square) _test_forward_unary_elemwise(_test_sin) _test_forward_unary_elemwise(_test_neg) _test_forward_unary_elemwise(_test_sqrt, negative=False) if tf.__version__ < LooseVersion("2.6.1"): _test_forward_unary_elemwise(_test_rsqrt, negative=False, int_quant_dtype=tf.uint8) else: _test_forward_unary_elemwise(_test_rsqrt, negative=False, int_quant_dtype=tf.int8) if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_forward_unary_elemwise(_test_ceil) if tf.__version__ < LooseVersion("2.6.1"): _test_forward_unary_elemwise(_test_cos, quantized=False) else: _test_forward_unary_elemwise(_test_cos, int_quant_dtype=tf.int8) _test_forward_unary_elemwise(_test_round) _test_forward_unary_elemwise(_test_elu, quantized=False) def _test_elemwise( math_op, data, fused_activation_function=None, quantized=False, qnn_op=None, same_qnn_params=False, comparison_op=False, ): """One iteration of elemwise""" assert len(data) == 2 def __test_elemwise(in_data): assert len(in_data) == 2 if quantized: out_min, out_max = _test_elemwise_qnn_out_range(qnn_op) inq0_min, inq0_max = (-100, 100) inq1_min, inq1_max = (-50, 50) if same_qnn_params: inq0_min, inq0_max = (out_min, out_max) inq1_min, inq1_max = (out_min, out_max) inq_data = [ tf.quantization.fake_quant_with_min_max_args( in_data[0], min=out_min, max=out_max, name="inq_0" ) if in_data[0] is not None else tf.quantization.fake_quant_with_min_max_args( data[0], min=out_min, max=out_max, name="const_tensor0" ), tf.quantization.fake_quant_with_min_max_args( in_data[1], min=out_min, max=out_max, name="inq_1" ) if in_data[1] is not None else tf.quantization.fake_quant_with_min_max_args( data[1], min=out_min, max=out_max, name="const_tensor1" ), ] input_range = { x[1][0]: x[1][1] for x in zip( in_data, (("inq_0", (inq0_min, inq0_max)), ("inq_1", (inq1_min, inq1_max))) )
if x[0] is not None } if comparison_op: out = math_op(inq_data[0], inq_data[1]) out = with_fused_activation_function(out, fused_activation_function) compare_tflite_with_tvm( [x[1] for x in zip(in_data, data) if x[0] is not None], [x + ":0" for x in input_range.keys()], [x[1] for x in zip(in_data, inq_data) if x[0] is not None], [out], quantized=True, input_range=input_range, experimental_new_converter=same_qnn_params, ) else: out = math_op(inq_data[0], inq_data[1]) out = with_fused_activation_function(out, fused_activation_function) out = tf.quantization.fake_quant_with_min_max_args( out, min=out_min, max=out_max, name="out" ) compare_tflite_with_tvm( [x[1] for x in zip(in_data, data) if x[0] is not None], [x + ":0" for x in input_range.keys()], [x[1] for x in zip(in_data, inq_data) if x[0] is not None], [out], quantized=True, input_range=input_range, experimental_new_converter=same_qnn_params, ) else: out = math_op( in_data[0] if in_data[0] is not None else ops.convert_to_tensor(data[0], dtype=data[0].dtype), in_data[1] if in_data[1] is not None else ops.convert_to_tensor(data[1], dtype=data[1].dtype), ) out = with_fused_activation_function(out, fused_activation_function) compare_tflite_with_tvm( [x[1] for x in zip(in_data, data) if x[0] is not None], [x[1] for x in zip(in_data, ("in_0:0", "in_1:0")) if x[0] is
not None], [x for x in in_data if x is not None], [out], ) with tf.Graph().as_default(): __test_elemwise( in_data=[ array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in_0"), array_ops.placeholder(shape=data[1].shape, dtype="float32", name="in_1"), ] ) with tf.Graph().as_default(): __test_elemwise( in_data=[array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in_0"), None] ) with tf.Graph().as_default(): __test_elemwise( in_data=[None, array_ops.placeholder(shape=data[1].shape, dtype="float32", name="in_1")] ) def _test_add(data, fused_activation_function=None, quantized=False, qnn_op=None): """One iteration of add""" return _test_elemwise(math_ops.add, data, fused_activation_function, quantized, qnn_op) def _test_sub(data, fused_activation_function=None, quantized=False, qnn_op=None): """One iteration of subtract""" return _test_elemwise(math_ops.subtract, data, fused_activation_function, quantized, qnn_op) def _test_mul(data, fused_activation_function=None, quantized=False, qnn_op=None): """One iteration of mul""" return _test_elemwise(math_ops.multiply, data, fused_activation_function, quantized, qnn_op) def _test_div(data, fused_activation_function=None): """One iteration of divide""" return _test_elemwise(math_ops.divide, data, fused_activation_function) def _test_pow(data): """One iteration of power""" return _test_elemwise(math_ops.pow, data) def _test_maximum(data, fused_activation_function=None, quantized=False, qnn_op=None): """One iteration of maximum""" return _test_elemwise( math_ops.maximum, data, fused_activation_function, quantized, qnn_op, same_qnn_params=True ) def _test_minimum(data, fused_activation_function=None, quantized=False, qnn_op=None): """One iterati
on of minimum""" return _test_elemwise( math_ops.minimum, data, fused_activation_function, quantized, qnn_op, same_qnn_params=True ) def _test_greater(data, fused_activation_function=None, quantized=False, qnn_op=None): """One iteration of greater""" return _test_elemwise( math_ops.greater, data, fused_activation_function, quantized, qnn_op, same_qnn_params=True, comparison_op=True, ) def _test_greater_equal(data): """One iteration of greater_equal""" return _test_elemwise(math_ops.greater_equal, data) def _test_less(data): """One iteration of less""" return _test_elemwise(math_ops.less, data) def _test_less_equal(data): """One iteration of less_equal""" return _test_elemwise(math_ops.less_equal, data) def _test_equal(data, fused_activation_function=None, quantized=False, qnn_op=None): """One iteration of equal""" return _test_elemwise( math_ops.equal, data, fused_activation_function, quantized, qnn_op, same_qnn_params=True, comparison_op=True, ) def _test_not_equal(data): """One iteration of not_equal""" return _test_elemwise(math_ops.not_equal, data) def _test_squared_difference(data): """One iteration of squared difference""" return _test_elemwise(math_ops.squared_difference, data) def _test_floor_divide(data): """One iteration of floor_div""" return _test_elemwise(math_ops.floordiv, data) def _test_floor_mod(data): """One iteration of floor_mod""" return _test_elemwise(math_ops.floormod, data) def _test_forward_elemwise(testop): """Elewise""" testop( [ np.arange(6.0, dtype=np.float32).reshape((2, 1, 1, 3)), np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)), ] ) testop( [ np.arange(6.0, dtype=np.float32).reshape((2, 1, 3)), np.arange(1.0, 7.0, dt
ype=np.float32).reshape((2, 1, 3)), ] ) testop( [ np.arange(3.0, dtype=np.float32).reshape((1, 3)), np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)), ] ) def _test_forward_elemwise_quantized(testop): testop( [ np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8), np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8), ], quantized=True, qnn_op=testop, ) def _test_elemwise_qnn_out_range(qnn_op): qnn_out_range = { _test_add: (-150, 150), _test_sub: (-150, 150), _test_mul: (-5e3, 5e3), _test_maximum: (-112, 111), _test_minimum: (-128, 127), _test_equal: (-150, 150), _test_greater: (-150, 150), } return qnn_out_range[qnn_op] def test_all_elemwise(): """All_elewise""" _test_forward_elemwise(_test_add) _test_forward_elemwise_quantized(_test_add) _test_forward_elemwise(partial(_test_add, fused_activation_function="RELU")) _test_forward_elemwise(_test_sub) _test_forward_elemwise_quantized(_test_sub) _test_forward_elemwise(partial(_test_sub, fused_activation_function="RELU")) _test_forward_elemwise(partial(_test_sub, fused_activation_function="RELU6")) _test_forward_elemwise(_test_mul) _test_forward_elemwise_quantized(_test_mul) _test_forward_elemwise(partial(_test_mul, fused_activation_function="RELU")) _test_forward_elemwise(partial(_test_mul, fused_activation_function="RELU6")) _test_forward_elemwise(_test_div) _test_forward_elemwise(partial(_test_div, fused_activation_function="RELU")) _test_forward_elemwise(partial(_test_div, fused_activation_function="RELU6")) _test_forward_elemwise(_test_pow) _test_forward_elemwise(_test_maximum) _test_forward_elemwise_quantized(_test_maximum) _test_forward_elemwise(_test_minimum) _test_forward_elemwise_quantized(_test_minimum) _test_forward_elemwise(_test_greater)
_test_forward_elemwise_quantized(_test_greater) _test_forward_elemwise(_test_squared_difference) _test_forward_elemwise(_test_greater_equal) _test_forward_elemwise(_test_less) _test_forward_elemwise(_test_less_equal) _test_forward_elemwise(_test_equal) _test_forward_elemwise_quantized(_test_equal) _test_forward_elemwise(_test_not_equal) if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): _test_forward_elemwise(_test_floor_divide) _test_forward_elemwise(_test_floor_mod) def _test_forward_add_n(inputs): tf.reset_default_graph() with tf.Graph().as_default(): temp = [] for each in inputs: temp.append(tf.placeholder(shape=each.shape, dtype=each.dtype)) output = tf.add_n(temp) compare_tflite_with_tvm( list(inputs), [each.name for each in temp], list(temp), [output], ) def test_forward_add_n(): """Add n""" if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): x = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32) y = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32) z_1 = np.random.randint(1, 100, size=(3, 3, 3), dtype=np.int32) x_1, x_2, z_2 = x.astype(np.float32), y.astype(np.float32), z_1.astype(np.float32) in0 = x in1 = [x, y] in2 = (x, y, z_1) in3 = x_1 in4 = [x_1, x_2] in5 = (x_1, x_2, z_2) _test_forward_add_n(in0) _test_forward_add_n(in1) _test_forward_add_n(in2) _test_forward_add_n(in3) _test_forward_add_n(in4) _test_forward_add_n(in5) def _test_logical_binary(logical_bin_op, data): with tf.Graph().as_default(): in_data = [ array_ops.placeholder(shape=data[0].shape, dtype="bool", name="in_0"), array_ops.placeholder(shape=data[1].shape, dtype="bool", name="in_1"), ] if logical_bin_op is math_ops.logical
_not: out = math_ops.logical_or(in_data[0], in_data[1], name="out1") out = logical_bin_op(out, name="out") else: out = logical_bin_op(in_data[0], in_data[1], name="out") compare_tflite_with_tvm(data, ["in_0:0", "in_1:0"], in_data, [out]) def _test_forward_logical_and(data): """One iteration of logical and""" return _test_logical_binary(math_ops.logical_and, data) def _test_forward_logical_or(data): """One iteration of logical or""" return _test_logical_binary(math_ops.logical_or, data) def _test_forward_logical_not(data): """One iteration of logical not""" return _test_logical_binary(math_ops.logical_not, data) def test_all_logical(): data = [ np.random.choice(a=[False, True], size=(2, 3, 4)).astype("bool"), np.random.choice(a=[False, True], size=(2, 3, 4)).astype("bool"), ] if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"): _test_forward_logical_and(data) _test_forward_logical_or(data) _test_forward_logical_not(data) def _test_zeros_like(data): """One iteration of ZEROS LIKE""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = gen_array_ops.zeros_like(in_data) compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out]) def test_forward_zeros_like(): """ZEROS LIKE""" _test_zeros_like(np.arange(6.0, dtype=np.float32).reshape((1, 6))) def _test_fill(dims, value_data, value_dtype): """Use the fill op to create a tensor of value_data with constant dims.""" value_data = np.array(value_data, dtype=value_dtype) if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): with tf.Graph().as_default(): value = array_ops.placeholder(dtype=value_dtype, name="value", shape=[]) out = tf.fill(dims, value) compare_tflite_with_tvm([value_data], ["value"], [value], [out]) with t
f.Graph().as_default(): input1 = array_ops.placeholder(dtype=value_dtype, name="input1", shape=dims) out = tf.fill(dims, value_data) out1 = tf.add(out, input1) input1_data = np.random.uniform(0, 5, size=dims).astype(value_dtype) compare_tflite_with_tvm([input1_data], ["input1"], [input1], [out1]) def test_forward_fill(): """Test FILL op""" _test_fill((1, 2, 2, 4), 5, "int32") _test_fill((1, 2, 2, 4), 5, "float32") _test_fill((5,), 5, "int32") def _test_reduce(math_op, data, keep_dims=None): """One iteration of reduce""" assert len(data) == 2 with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data[0].shape, dtype=data[0].dtype, name="in") out = math_op(in_data, data[1], keep_dims) compare_tflite_with_tvm([data[0]], ["in:0"], [in_data], [out]) def _test_reduce_quantize(math_op, data, keep_dims=None): """One iteration of reduce""" assert len(data) == 2 with tf.Graph().as_default(): in_data = [array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in")] inq_data = [ tf.quantization.fake_quant_with_min_max_args( in_data[0], min=-100, max=100, name="inq_0" ) ] input_range = {"inq_0": (-100, 100)} out = math_op(inq_data, data[1], keep_dims) out = tf.quantization.fake_quant_with_min_max_args(out, min=-200, max=200, name="out") compare_tflite_with_tvm( [data[0]], ["inq_0:0"], [inq_data[0]], [out], quantized=True, input_range=input_range ) def _test_reduce_min(data, keep_dims=None): """One iteration of reduce_min""" return _test_reduce(math_ops.reduce_min, data, keep_dims) def _test_reduce_max(data, keep_dims=None): """One iteration of reduce_max""" return _test_reduce(math_ops.reduce_max, data, keep_dims) def _test_reduce_mean(data, keep_dims=None, quantized=False): """One iteration of reduce_mean"""
if quantized: return _test_reduce_quantize(math_ops.reduce_mean, data, keep_dims) else: return _test_reduce(math_ops.reduce_mean, data, keep_dims) def _test_reduce_prod(data, keep_dims=None): """One iteration of reduce_prod""" return _test_reduce(math_ops.reduce_prod, data, keep_dims) def _test_reduce_sum(data, keep_dims=None): """One iteration of reduce_sum""" return _test_reduce(math_ops.reduce_sum, data, keep_dims) def _test_reduce_any(data, keep_dims=None): """One iteration of reduce_any""" return _test_reduce(math_ops.reduce_any, data, keep_dims) def _test_forward_reduce(testop, dtype="float32"): """Reduce""" if dtype == "bool": data0 = [np.random.choice(a=[False, True], size=(16, 16, 16, 16)).astype(dtype), None] data1 = [ np.random.choice(a=[False, True], size=(16, 16, 16, 16)).astype(dtype), np.array(1, dtype=np.int32), ] data2 = [ np.random.choice(a=[False, True], size=(16, 16, 16, 16)).astype(dtype), np.array([1, 2], dtype=np.int32), ] else: data0 = [np.random.rand(16, 16, 16, 16).astype(dtype), None] data1 = [np.random.rand(16, 16, 16, 16).astype(dtype), np.array(1, dtype=np.int32)] data2 = [np.random.rand(16, 16, 16, 16).astype(dtype), np.array([1, 2], dtype=np.int32)] for data in [data0, data1, data2]: testop(data) testop(data, keep_dims=False) testop(data, keep_dims=True) def _test_forward_reduce_quantized(testop): data0 = [ np.array(np.random.uniform(0, 255, (3, 6)), dtype=np.uint8), np.array([1, 2], dtype=np.int32), ] testop(data0, quantized=True) testop(data0, keep_dims=False, quantized=True) testop(data0, keep_dims=True, quantized=True) def test_all_reduce(): _test_forward_reduce(_test_reduce_min) _test_forward_reduce(_test_reduce_max) _test_forward_reduce(_test_reduce_mean) _test_forward_reduce_quantized(_test_reduce_mean)
_test_forward_reduce(_test_reduce_prod) _test_forward_reduce(_test_reduce_sum) if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"): _test_forward_reduce(_test_reduce_any, dtype="bool") def _test_arg_min_max(math_op, data, axis, quantized=False): """One iteration of arg_min_max""" with tf.Graph().as_default(): t_name = "in" in_data = array_ops.placeholder(shape=data.shape, dtype=np.float32, name=t_name) input_range = None qmin, qmax = -100, 102 if quantized: inq_data = tf.quantization.fake_quant_with_min_max_args( in_data, min=qmin, max=qmax, name="q" + t_name ) input_range = {inq_data.name.split(":")[0]: (qmin, qmax)} out = math_op(input=inq_data, axis=axis) compare_tflite_with_tvm( [data], [inq_data.name], [inq_data], [out], quantized=True, input_range=input_range ) else: out = math_op(input=in_data, axis=axis) compare_tflite_with_tvm([data], [in_data.name], [in_data], [out]) def test_forward_arg_min_max(): """Arg min max""" for data in [np.array(np.random.uniform(-100, 100, (3, 4)), dtype=np.uint8)]: for axis in [None, 0, 1, -1]: _test_arg_min_max(math_ops.argmax, data, axis, True) for data in [np.array(np.random.uniform(-100, 100, (3, 4)), dtype=np.float32)]: for axis in [None, 0, 1, -1]: _test_arg_min_max(math_ops.argmax, data, axis) _test_arg_min_max(math_ops.argmin, data, axis) def test_forward_select(): """Select""" with tf.Graph().as_default(): with tf.Session() as _: input1 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input1") input2 = tf.placeholder(tf.int32, shape=[1, 4, 4, 3], name="input2") mask = input1 > input2 out = tf.where(mask, input1 + 1, input2 * 2) in_data1 = np.random.uniform(0, 10, size=(1, 4
, 4, 3)).astype("int32") in_data2 = np.random.uniform(0, 10, size=(1, 4, 4, 3)).astype("int32") compare_tflite_with_tvm( [in_data1, in_data2], ["input1:0", "input2:0"], [input1, input2], [out] ) @pytest.mark.parametrize("quant_bits", [2, 4, 8, 16]) @pytest.mark.parametrize( "value, min_value, max_value", [[-10.11, -6, 6], [-3.55, -6, 6], [0, -6, 6], [3.55, -6, 6], [10.11, -6, 6]], ) def test_forward_fake_quant(value, min_value, max_value, quant_bits): """Fake quant""" with tf.Graph().as_default(): with tf.Session() as _: input_placeholder = tf.placeholder(tf.float32, shape=[1], name="input") out = tf.quantization.fake_quant_with_min_max_args( input_placeholder, min=min_value, max=max_value, num_bits=quant_bits, name=None ) in_data = np.float32(value) compare_tflite_with_tvm([in_data], ["input:0"], [input_placeholder], [out]) def _test_squeeze(data, squeeze_dims=None): """One iteration of squeeze""" if squeeze_dims is None: squeeze_dims = [] with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) if squeeze_dims: out = array_ops.squeeze(in_data, squeeze_dims) else: out = array_ops.squeeze(in_data) compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out]) def test_forward_squeeze(): """Squeeze""" _test_squeeze(np.arange(6).reshape((1, 2, 1, 3)), [0, 2]) _test_squeeze(np.arange(6).reshape((2, 1, 3, 1)), [1, 3]) def _test_quantize_dequantize(data): """One iteration of quantize and dequantize""" data_in = tf.keras.layers.Input(shape=data.shape[1:]) relu = tf.keras.layers.ReLU()(data_in) add = tf.keras.layers.Add()([data_in, relu]) concat = tf.keras.layers.Concatenate(axis=0)([relu, add]) keras_model = tf.keras.models.Model(inputs=data_in, outputs=concat) def repr
esentative_data_gen(): for _ in range(1): yield [data] tflite_model_quant = _quantize_keras_model(keras_model, representative_data_gen, True, True) tflite_output = run_tflite_graph(tflite_model_quant, data) if tf.__version__ < LooseVersion("2.9"): in_node = data_in.name.split(":")[0] else: in_node = "serving_default_" + data_in.name + ":0" tvm_output = run_tvm_graph(tflite_model_quant, data, in_node) tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2 ) def _test_quantize_dequantize_const(data): """One iteration of quantize and dequantize""" data_in = tf.keras.layers.Input(shape=data.shape[1:]) relu = tf.keras.layers.ReLU()(data_in) add = tf.keras.layers.Add()([data, relu]) concat = tf.keras.layers.Concatenate(axis=0)([relu, add]) keras_model = tf.keras.models.Model(inputs=data_in, outputs=concat) def representative_data_gen(): for _ in range(1): yield [data] tflite_model_quant = _quantize_keras_model(keras_model, representative_data_gen, True, True) tflite_output = run_tflite_graph(tflite_model_quant, data) if tf.__version__ < LooseVersion("2.9"): in_node = data_in.name.split(":")[0] else: in_node = "serving_default_" + data_in.name + ":0" tvm_output = run_tvm_graph(tflite_model_quant, data, in_node) tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-2 ) def test_forward_quantize_dequantize(): """Quantize Dequantize""" data = np.random.uniform(0, 1, (1, 4, 4, 3)).astype("float32") if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"): _test_quantize_dequantize(data) _test_quantize_dequantize_const(data) def _test_pad(data, mode="CONSTANT", quantized=False): """One iteration of PAD""" assert len(data) == 2 with tf.Graph().as_default():
in_data = [array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in")] if quantized: input_range = {"inq_0": (-100, 100)} inq_data = [ tf.quantization.fake_quant_with_min_max_args( in_data[0], min=-100, max=100, name="inq_0" ) ] out = array_ops.pad( inq_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode ) compare_tflite_with_tvm( [data[0]], ["inq_0:0"], inq_data, [out], quantized=True, input_range=input_range ) else: out = array_ops.pad( in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode ) compare_tflite_with_tvm([data[0]], ["in:0"], in_data, [out]) def test_forward_pad(): """Pad""" _test_pad( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)), np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32), ] ) _test_pad( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)), np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32), ] ) _test_pad( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)), np.array([[1, 1], [2, 2]], dtype=np.int32), ] ) _test_pad( [ np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)), np.array([[1, 1], [2, 2]], dtype=np.int32), ] ) _test_pad( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)), np.array([[1, 1], [2, 2]], dtype=np.int32), ], mode="REFLECT", ) _test_pad( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)), np.array([[1, 1], [2, 2]], dtype=np.int32), ], mode="SYMMETRIC", ) _test_pad( [ np.arange(1.0, 7.0, dtype=np.floa
t32).reshape((2, 3)), np.array([[1, 1], [2, 2]], dtype=np.int64), ], mode="REFLECT", ) _test_pad( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)), np.array([[1, 1], [2, 2]], dtype=np.int64), ], mode="SYMMETRIC", ) _test_pad( [ np.arange(0, 256, dtype=np.uint8).reshape((1, 256)), np.array([[1, 1], [2, 2]], dtype=np.int32), ], quantized=True, ) def _test_padv2(data, mode="CONSTANT", quantized=False): """One iteration of PADV2""" assert len(data) == 2 or len(data) == 3 with_constant_values = len(data) == 3 with tf.Graph().as_default(): in_data = [array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in")] if quantized: input_range = {"inq_0": (-100, 100)} inq_data = [ tf.quantization.fake_quant_with_min_max_args( in_data[0], min=-100, max=100, name="inq_0" ) ] if with_constant_values: in_constant_values = constant_op.constant( data[2], shape=data[2].shape, dtype="float32", name="in_constant_values" ) inq_constant_values = tf.quantization.fake_quant_with_min_max_args( in_constant_values, min=-100, max=100, name="inq_constant_values" ) out = array_ops.pad_v2( inq_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), constant_values=inq_constant_values, mode=mode, ) out = tf.quantization.fake_quant_with_min_max_args( out, min=-100, max=100, name="out" ) else: out = array_ops.pad_v2( inq_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode )
compare_tflite_with_tvm( [data[0]], ["inq_0:0"], inq_data, [out], quantized=True, input_range=input_range ) else: if with_constant_values: out = array_ops.pad_v2( in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), constant_values=ops.convert_to_tensor(data[2], dtype=data[2].dtype), mode=mode, ) else: out = array_ops.pad_v2( in_data[0], ops.convert_to_tensor(data[1], dtype=data[1].dtype), mode=mode ) compare_tflite_with_tvm([data[0]], ["in:0"], in_data, [out]) def test_forward_padv2(): """PADV2""" _test_padv2( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)), np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32), ] ) _test_padv2( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)), np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32), ] ) _test_padv2( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)), np.array([[1, 1], [2, 2]], dtype=np.int32), ] ) _test_padv2( [ np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)), np.array([[1, 1], [2, 2]], dtype=np.int32), ] ) _test_padv2( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)), np.array([[1, 1], [2, 2]], dtype=np.int32), ], mode="REFLECT", ) _test_padv2( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)), np.array([[1, 1], [2, 2]], dtype=np.int32), ], mode="SYMMETRIC", ) _test_padv2( [ np.arange(0, 256, dtype=np.uint8).reshape((1, 256)), np.array([[1, 1], [2, 2]], dtype=np.int32), ], quantized=True, )
_test_padv2( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)), np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32), np.array([2], dtype=np.float32), ] ) _test_padv2( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 3)), np.array([[2, 2], [1, 1], [1, 1]], dtype=np.int32), np.array([1], dtype=np.float32), ] ) _test_padv2( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 3)), np.array([[1, 1], [2, 2]], dtype=np.int32), np.array([-1], dtype=np.float32), ] ) _test_padv2( [ np.arange(1.0, 4.0, dtype=np.float32).reshape((1, 3)), np.array([[1, 1], [2, 2]], dtype=np.int32), np.array([2], dtype=np.float32), ] ) if package_version.parse(tf.VERSION) <= package_version.parse("2.1.0"): _test_padv2( [ np.arange(0, 256, dtype=np.uint8).reshape((1, 256)), np.array([[1, 1], [2, 2]], dtype=np.int32), np.array([2], dtype=np.float32), ], quantized=True, ) _test_padv2( [ np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)), np.array([[1, 1], [2, 2], [1, 1], [2, 2]], dtype=np.int32), np.float32(2), ] ) if package_version.parse(tf.VERSION) <= package_version.parse("2.1.0"): _test_padv2( [ np.arange(0, 256, dtype=np.uint8).reshape((1, 256)), np.array([[1, 1], [2, 2]], dtype=np.int32), np.uint8(10), ], quantized=True, ) def _test_expand_dims(input_shape, input_type, axis, quantized=False): """One iteration of EXPAND_DIMS""" with tf.Graph().as_default(): axis = ops.convert_to_tensor(axis, dtype=axis.dtype) if quantized: input_ar
ray = np.random.uniform(0, 256, input_shape).astype("uint8") in_input = tf.placeholder(dtype="float32", shape=input_array.shape, name="input") input_range = {"q_input": (-100, 100)} inq_input = tf.quantization.fake_quant_with_min_max_args( in_input, min=-100, max=100, name="q_input" ) out = array_ops.expand_dims(inq_input, axis=axis) out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out") compare_tflite_with_tvm( [input_array], ["q_input"], [inq_input], [out], quantized=True, input_range=input_range, ) else: input_array = np.random.uniform(-100, 100, input_shape).astype(input_type) in_input = tf.placeholder( dtype=input_array.dtype, shape=input_array.shape, name="input" ) out = array_ops.expand_dims(in_input, axis=axis) compare_tflite_with_tvm([input_array], ["input"], [in_input], [out]) def test_forward_expand_dims(): """EXPAND_DIMS""" for quantized in [False, True]: _test_expand_dims((6, 2, 7, 5), "float32", np.int32(0), quantized=quantized) _test_expand_dims((1, 2, 3), "int32", np.int32(-2), quantized=quantized) _test_expand_dims((2, 4, 5), "float32", np.array([1], dtype=np.int32), quantized=quantized) def _test_one_hot(indices, depth, on_value, off_value, axis=None): """One iteration of One_Hot""" with tf.Graph().as_default(): in_indices = tf.placeholder(dtype=indices.dtype, shape=indices.shape, name="indices") in_depth = ops.convert_to_tensor(depth, dtype=depth.dtype) in_on_value = tf.placeholder(dtype=on_value.dtype, shape=on_value.shape, name="on_value") in_off_value = tf.placeholder( dtype=off_value.dtype, shape=off_value.shape, name="off_value" ) if axis is not None:
out = array_ops.one_hot(in_indices, in_depth, in_on_value, in_off_value, axis=axis) else: out = array_ops.one_hot(in_indices, in_depth, in_on_value, in_off_value) compare_tflite_with_tvm( [indices, on_value, off_value], ["indices", "on_value", "off_value"], [in_indices, in_on_value, in_off_value], [out], ) def test_forward_one_hot(): """One_Hot""" _test_one_hot(np.int32(2), np.int32(8), np.int32(1), np.int32(0)) _test_one_hot(np.int32(4), np.int32(8), np.float32(1), np.float32(0)) _test_one_hot(np.array([1, 2, 3], dtype=np.int32), np.int32(8), np.int32(3), np.int32(-1)) _test_one_hot( np.array([1, 2, 3], dtype=np.int32), np.int32(8), np.int32(3), np.int32(-1), axis=0 ) def _test_pack(data, is_var, axis, quantized=False): """One iteration of pack""" assert len(data) >= 1 assert len(data) == len(is_var) if quantized: with tf.Graph().as_default(): in_data = [ array_ops.placeholder(shape=d.shape, dtype="float32", name="in_" + str(idx)) if is_var[idx] else constant_op.constant( d, shape=d.shape, dtype="float32", name="in_constant_" + str(idx) ) for idx, d in enumerate(data) ] inq_data = [ tf.quantization.fake_quant_with_min_max_args( i_data, min=-100, max=100, name=f"inq_{idx}" ) for idx, i_data in enumerate(in_data) ] input_range = {} for i in range(len(data)): input_range[f"inq_{i}"] = (-100, 100) out = array_ops.pack(inq_data, axis=axis) out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out") name = [f"inq_{idx}:0" for idx in range(len(data))] compare_tflite_with_tvm( data, name, inq_data, [out], quantized=True, input_
range=input_range ) else: with tf.Graph().as_default(): in_data = [ array_ops.placeholder(shape=d.shape, dtype=d.dtype, name="in_" + str(idx)) if is_var[idx] else constant_op.constant( d, shape=d.shape, dtype=d.dtype, name="in_constant_" + str(idx) ) for idx, d in enumerate(data) ] out = array_ops.pack(in_data, axis=axis) name = [_.name for _ in in_data] compare_tflite_with_tvm(data, name, in_data, [out], experimental_new_converter=True) def test_forward_pack(): """Pack""" _test_pack([np.int32(1), np.int32(5)], [False, False], 0) _test_pack([np.array([1, 4]), np.array([2, 5]), np.array([3, 6])], [True, False, False], 0) _test_pack( [np.arange(6).reshape((1, 2, 1, 3)), np.arange(6).reshape((1, 2, 1, 3))], [True, True], 1 ) _test_pack([np.arange(6).reshape((3, 2)), np.arange(6).reshape((3, 2))], [True, True], 1) _test_pack( [ np.arange(6).reshape((2, 1, 1, 3)), np.arange(6).reshape((2, 1, 1, 3)), np.arange(6).reshape((2, 1, 1, 3)), ], [True, True, True], 1, ) _test_pack( [ np.arange(6, dtype=np.uint8).reshape((2, 1, 1, 3)), np.arange(6, dtype=np.uint8).reshape((2, 1, 1, 3)), np.arange(6, dtype=np.uint8).reshape((2, 1, 1, 3)), ], [True, True, True], 1, quantized=True, ) def _test_unpack(data, axis, num_unpacks): """One iteration of UNPACK""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = gen_array_ops.unpack(in_data, num=num_unpacks, axis=axis, name="unpack") out_names = ["out_" + str(n) + ":0" for n in range(num_unpacks)] compare_tflite_with_tvm([data], "Placeholder:0", [in_data], out, out_names=out_names) def test_forward_unpack():
"""UNPACK""" _test_unpack(np.array(np.random.uniform(0, 5, (3, 1)), dtype=np.int32), axis=1, num_unpacks=1) _test_unpack(np.array(np.random.uniform(0, 5, (3, 4)), dtype=np.float32), axis=0, num_unpacks=3) _test_unpack( np.array(np.random.uniform(0, 5, (3, 1, 2)), dtype=np.float32), axis=0, num_unpacks=3 ) if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): _test_unpack( np.array(np.random.uniform(0, 5, (3, 6)), dtype=np.int32), axis=-2, num_unpacks=3 ) _test_unpack( np.array(np.random.uniform(0, 5, (2, 3, 4)), dtype=np.int32), axis=-3, num_unpacks=2 ) def _test_local_response_normalization(data, depth_radius, bias, alpha, beta): """One iteration of LOCAL_RESPONSE_NORMALIZATION""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") out = nn_ops.local_response_normalization( in_data, depth_radius=depth_radius, bias=bias, alpha=alpha, beta=beta ) compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) def test_forward_local_response_normalization(): """LOCAL_RESPONSE_NORMALIZATION""" data = np.random.uniform(size=(1, 6, 4, 3)).astype("float32") if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): _test_local_response_normalization(data, depth_radius=5, bias=1, alpha=1, beta=0.5) def _test_l2_normalization(data, axis, fused_activation_function=None): """One iteration of L2_NORMALIZATION""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = nn_impl.l2_normalize(in_data, axis) out = with_fused_activation_function(out, fused_activation_function) compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out]) def test_forward_l2_normalization(): """L2_NORMALIZATION""" data = np.random.uniform(size=(3, 6, 4)).astype("float32") _test_l2_n
ormalization(data, axis=2) _test_l2_normalization(data, axis=2, fused_activation_function="RELU") def _test_logistic(data, quantized=False): """One iteration of LOGISTIC""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") if quantized: inq_data = tf.quantization.fake_quant_with_min_max_args( in_data, min=-5, max=5, name="inq_0" ) input_range = {"inq_0": (-5, 5)} out = math_ops.sigmoid(inq_data) out = tf.quantization.fake_quant_with_min_max_args(out, min=0, max=1, name="out") compare_tflite_with_tvm( data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range ) else: out = math_ops.sigmoid(in_data) compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) def test_forward_logistic(): """LOGISTIC""" _test_logistic(np.arange(6.0, dtype=np.float32).reshape((1, 6))) _test_logistic(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True) def _test_softmax(data): """One iteration of softmax""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = nn_ops.softmax(in_data) compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out]) def test_forward_softmax(): """Softmax""" _test_softmax(np.arange(6.0, dtype=np.float32).reshape((1, 6))) _test_softmax(np.arange(6.0, dtype=np.float32).reshape((1, 2, 3))) def _test_log_softmax(data, quantized=False): """One iteration of log_softmax""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") if quantized: inq_data = tf.quantization.fake_quant_with_min_max_args( in_data, min=-10, max=10, name="inq_0" ) input_range = {"inq_0": (-10, 10)}
out = nn_ops.log_softmax(inq_data) out = tf.quantization.fake_quant_with_min_max_args(out, min=-20, max=0, name="out") compare_tflite_with_tvm( data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range ) else: out = nn_ops.log_softmax(in_data) compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) def test_forward_log_softmax(): """Log_softmax""" _test_log_softmax(np.random.uniform(-10, 10, size=(3, 6)).astype(np.float32)) _test_log_softmax(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True) def _test_tanh(data, quantized=False): """One iteration of TANH""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") if quantized: inq_data = tf.quantization.fake_quant_with_min_max_args( in_data, min=-3, max=3, name="inq_0" ) input_range = {"inq_0": (-3, 3)} out = math_ops.tanh(inq_data) out = tf.quantization.fake_quant_with_min_max_args(out, min=-1, max=1, name="out") compare_tflite_with_tvm( data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range ) else: out = math_ops.tanh(in_data) compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) def test_forward_tanh(): """TANH""" _test_tanh(np.arange(6.0, dtype=np.float32).reshape((1, 6)), quantized=False) _test_tanh(np.arange(0, 256, 30, dtype=np.uint8), quantized=True) def _test_relu(data, quantized=False): """One iteration of ReLU""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") if quantized: inq_data = tf.quantization.fake_quant_with_min_max_args( in_data, min=-10, max=10, name="inq_0" ) input_range = {"inq_0": (-10, 10)}
out = nn_ops.relu(inq_data) out = tf.quantization.fake_quant_with_min_max_args(out, min=0, max=6, name="out") compare_tflite_with_tvm( data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range ) else: out = nn_ops.relu(in_data) compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) def test_forward_relu(): """ReLU""" _test_relu(np.arange(6.0, dtype=np.float32).reshape((1, 6))) _test_relu(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True) def _test_relu6(data, quantized=False): """One iteration of ReLU6""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") if quantized: inq_data = tf.quantization.fake_quant_with_min_max_args( in_data, min=-10, max=10, name="inq_0" ) input_range = {"inq_0": (-10, 10)} out = nn_ops.relu6(inq_data) out = tf.quantization.fake_quant_with_min_max_args(out, min=0, max=6, name="out") compare_tflite_with_tvm( data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range ) else: out = nn_ops.relu6(in_data) compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) def test_forward_relu6(): """ReLU6""" _test_relu6(np.random.uniform(-10, 10, size=(3, 6)).astype(np.float32)) _test_relu6(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True) def _test_leaky_relu(data, alpha, quantized=False): """One iteration of Leaky_ReLU""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") if quantized: inq_data = tf.quantization.fake_quant_with_min_max_args( in_data, min=-3, max=2, name="inq_0" ) input_range = {"inq_0": (-3, 2)} out = n
n_ops.leaky_relu(inq_data, alpha) out = tf.quantization.fake_quant_with_min_max_args(out, min=-3, max=2, name="out") compare_tflite_with_tvm( data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range ) else: out = nn_ops.leaky_relu(in_data, alpha) compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) def test_forward_leaky_relu(): """Leaky_ReLU""" _test_leaky_relu(np.random.uniform(-5, 5, (1, 6)).astype(np.float32), alpha=0.2) if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): _test_leaky_relu( np.random.uniform(0, 255, (2, 3)).astype(np.uint8), alpha=0.3, quantized=True ) def _test_relu_n1_to_1(data, quantized=False): """One iteration of ReLU_n1_to_1""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype="float32", name="in_0") if quantized: inq_data = tf.quantization.fake_quant_with_min_max_args( in_data, min=-3, max=3, name="inq_0" ) input_range = {"inq_0": (-3, 3)} out = math_ops.maximum(-1.0, math_ops.minimum(inq_data, 1.0)) out = tf.quantization.fake_quant_with_min_max_args(out, min=-1, max=1, name="out") compare_tflite_with_tvm( data, "inq_0:0", [inq_data], [out], quantized=True, input_range=input_range ) else: out = math_ops.maximum(-1.0, math_ops.minimum(in_data, 1.0)) compare_tflite_with_tvm(data, "in_0:0", [in_data], [out]) def test_forward_relu_n1_to_1(): """ReLU_n1_to_1""" _test_relu_n1_to_1(np.random.uniform(-3, 3, (1, 6)).astype(np.float32)) if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): _test_relu_n1_to_1(np.random.uniform(0, 255, (3, 6)).astype(np.uint8), quantized=True) def _test_prelu(data, alpha): """One iteration of PReLU""" wit
h tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = nn_ops.relu(in_data) + (-alpha * nn_ops.relu(-in_data)) compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out]) def test_forward_prelu(): """PReLU""" _test_prelu( np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"), np.full((3,), 0.2, dtype="float32"), ) _test_prelu( np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"), np.full((1, 3), 0.2, dtype="float32"), ) _test_prelu( np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"), np.full((1, 1, 3), 0.2, dtype="float32"), ) _test_prelu( np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"), np.full((1, 1, 1, 3), 0.2, dtype="float32"), ) _test_prelu( np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"), np.full((32, 3), 0.2, dtype="float32"), ) _test_prelu( np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"), np.full((32, 32, 3), 0.2, dtype="float32"), ) _test_prelu( np.random.uniform(-5, 5, size=(1, 32, 32, 3)).astype("float32"), np.full((1, 32, 1, 3), 0.2, dtype="float32"), ) _test_prelu( np.random.uniform(-5, 5, size=(1, 1, 3)).astype("float32"), np.full((3,), 0.2, dtype="float32"), ) _test_prelu( np.random.uniform(-5, 5, size=(1, 32, 3)).astype("float32"), np.full((32, 3), 0.2, dtype="float32"), ) _test_prelu( np.random.uniform(-5, 5, size=(32, 3)).astype("float32"), np.full((3), 0.2, dtype="float32") ) def _test_depthtospace(data, block_size): """One iteration of depth_to_space operation with given data and block size""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = array_ops.depth_to_space(in_data, block_size) compare
_tflite_with_tvm(data, "Placeholder:0", [in_data], [out]) def test_forward_depthtospace(): if package_version.parse(tf.VERSION) >= package_version.parse("1.15.0"): _test_depthtospace(np.random.normal(size=[1, 32, 32, 4]).astype("float32"), 2) _test_depthtospace(np.random.normal(size=[1, 16, 8, 32]).astype("float32"), 4) def _test_spacetodepth(data, block_size): """One iteration of space_to_depth operation with given data and block size""" with tf.Graph().as_default(): in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype) out = array_ops.space_to_depth(in_data, block_size) compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out]) def test_forward_spacetodepth(): _test_spacetodepth(np.random.normal(size=[1, 32, 32, 4]).astype("float32"), 2) _test_spacetodepth(np.random.normal(size=[1, 16, 8, 32]).astype("float32"), 4) def _test_reverse_sequence(shape, dtype, seq_lengths, batch_axis, seq_axis): """One iteration of reverse_sequence operation with given data and attributes""" data = np.random.uniform(0, 100, size=shape).astype(dtype) with tf.Graph().as_default(): in_data = array_ops.placeholder(dtype=dtype, name="input", shape=shape) out = tf.reverse_sequence( in_data, seq_lengths=seq_lengths, batch_axis=batch_axis, seq_axis=seq_axis ) compare_tflite_with_tvm(data, "input", [in_data], [out]) def test_forward_reverse_sequence(): if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): _test_reverse_sequence([4, 3], "float32", [3, 2, 1], 1, 0) _test_reverse_sequence([4, 3], "float32", [3, 2, 1, 3], 0, 1) _test_reverse_sequence([2, 3, 3, 3], "float32", [2, 3, 2], 2, 1) _test_reverse_sequence([2, 4, 6, 4, 5], "float32", [5, 3], 0, 2) _test_reverse_sequence([2, 4, 6, 4, 5], "float32", [5, 3, 1, 4], 3, 2) def _test_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape):
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"): with tf.Graph().as_default(): indices = tf.placeholder( shape=sparse_indices.shape, dtype=str(sparse_indices.dtype), name="indices" ) values = tf.placeholder( shape=sparse_values.shape, dtype=str(sparse_values.dtype), name="values" ) oshape = tf.constant( output_shape, shape=output_shape.shape, dtype=str(output_shape.dtype) ) if default_value is None: output = tf.sparse_to_dense(indices, oshape, values) compare_tflite_with_tvm( [sparse_indices, sparse_values], ["indices", "values"], [indices, values], [output], ) else: dv_placeholder = tf.placeholder( shape=(), dtype=str(default_value.dtype), name="default_value" ) output = tf.sparse_to_dense(indices, oshape, values, dv_placeholder) compare_tflite_with_tvm( [sparse_indices, sparse_values, default_value], ["indices", "values", "default_value"], [indices, values, dv_placeholder], [output], ) def test_forward_sparse_to_dense(): """ Works in tvm/topi/tensorflow. But tflite converter breaks this test case _test_sparse_to_dense( np.int32(1), np.int32(3), np.int32(0), np.array([5]).astype("int32") ) """ _test_sparse_to_dense( np.array([0, 1, 4]).astype("int32"), np.array([3, 3, 3]).astype("int32"), np.int32(0), np.array([5]).astype("int32"), ) _test_sparse_to_dense( np.array([[0, 0], [1, 2]]).astype("int32"), np.array([1, 2]).astype("int32"), np.int32(0), np.array([3, 4]).astype("int32"), ) _test_sp
arse_to_dense( np.array([[0, 0, 0], [1, 2, 3]]).astype("int32"), np.array([1, 2]).astype("int32"), np.int32(4), np.array([2, 3, 4]).astype("int32"), ) _test_sparse_to_dense( np.array([0, 1, 4]).astype("int32"), np.array([3.1, 3.1, 3.1]).astype("float32"), np.float32(3.5), np.array([5]).astype("int32"), ) _test_sparse_to_dense( np.array([0, 1, 4]).astype("int32"), np.array([3.1, 3.1, 3.1]).astype("float32"), None, np.array([5]).astype("int32"), ) def _test_fully_connected( tensor_in_sizes, const_input, filter_in_sizes, bias_in_size=None, quantized=False, fp16_quantized=False, ): """One iteration of fully connected""" total_size_1 = np.prod(tensor_in_sizes) total_size_2 = np.prod(filter_in_sizes) assert ( int(total_size_1 / tensor_in_sizes[0]) == filter_in_sizes[0] ), "input size and filter size are mismatched" data_array = np.arange( 1, total_size_1 + 1, dtype=np.uint8 if quantized and not fp16_quantized else np.float32 ) filter_array = np.arange( 1, total_size_2 + 1, dtype=np.uint8 if quantized and not fp16_quantized else np.float32 ) in_name = "input" with tf.Graph().as_default(): in_data = ( constant_op.constant(data_array, shape=tensor_in_sizes, dtype=np.float32, name=in_name) if const_input else array_ops.placeholder(shape=tensor_in_sizes, dtype=np.float32, name=in_name) ) in_filter = constant_op.constant(filter_array, shape=filter_in_sizes, dtype=np.float32) data_array = np.reshape(data_array, tensor_in_sizes) if bias_in_size: assert bias_in_size[0] == filter_in_sizes[1], "bias and filter size are mismatched" bias_array = np.arange( 1, bias_in_size[0] + 1, dtype=np.uint8 if quantized else np.float32 ) in_bias = constant_op.cons
tant(bias_array, shape=bias_in_size, dtype=np.float32) if quantized and not fp16_quantized: inq_data = tf.quantization.fake_quant_with_min_max_args( in_data, min=-100, max=100, name="inq_0" ) input_range = {"inq_0": (-100, 100)} inq_filter = tf.quantization.fake_quant_with_min_max_args( in_filter, min=-100, max=100, name="inq_1" ) input_range = {"inq_0": (-100, 100), "inq_1": (-100, 100)} inq_data_reshape = array_ops.reshape(inq_data, [tensor_in_sizes[0], -1]) out = math_ops.mat_mul(inq_data_reshape, inq_filter) out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out") if bias_in_size: out = nn_ops.bias_add(out, in_bias) compare_tflite_with_tvm( data_array, inq_data.name, [inq_data], [out], quantized=True, input_range=input_range, experimental_new_converter=True, ) else: in_data_reshape = array_ops.reshape(in_data, [tensor_in_sizes[0], -1]) out = math_ops.mat_mul(in_data_reshape, in_filter) if bias_in_size: out = nn_ops.bias_add(out, in_bias) compare_tflite_with_tvm( data_array, in_data.name, [in_data], [out], experimental_new_converter=True, fp16_quantized=fp16_quantized, ) def test_forward_fully_connected(): """Fully Connected""" for input_shape, weight_shape, bias_shape in [ ([1, 4], [4, 4], None), ([1, 4], [4, 4], [4]), ([1, 1, 1, 5], [5, 5], None), ([1, 1, 10], [10, 103], None), ([1, 1, 1, 150], [150, 100], None), ([1, 1, 1, 150], [150, 100], None), ([1, 1, 1, 150
], [150, 100], [100]), ([5, 1, 1, 150], [150, 100], None), ([5, 1, 1, 150], [150, 100], [100]), ]: for const_input in [False, True]: for quantized in [False, True]: for fp16_quantized in [False, True]: _test_fully_connected( input_shape, const_input, weight_shape, bias_shape, quantized, fp16_quantized, ) def _test_reverse_v2(input_shape, axis, dtype): """One iteration of REVERSE_V2""" with tf.Graph().as_default(): input_array = np.random.randint(0, 100, size=input_shape).astype(dtype) in_input = tf.placeholder(dtype=input_array.dtype, shape=input_array.shape, name="input") in_axis = ops.convert_to_tensor(axis, dtype=axis.dtype) out = array_ops.reverse(in_input, in_axis) compare_tflite_with_tvm([input_array], ["input"], [in_input], [out]) def test_forward_reverse_v2(): """REVERSE_V2""" for dtype in ["float32", "int32"]: _test_reverse_v2((5), np.array([0], dtype="int32"), dtype) _test_reverse_v2((5, 6, 4, 2), np.array([2], dtype="int32"), dtype) def _test_matrix_set_diag(input_shape, input_type, quantized=False): """One iteration of MATRIX_SET_DIAG""" with tf.Graph().as_default(): diagonal_shape = list(input_shape[:-2]) diagonal_shape.append(min(input_shape[-2], input_shape[-1])) if quantized: input_array = np.random.uniform(0, 256, input_shape).astype("uint8") in_input = tf.placeholder(dtype="float32", shape=input_array.shape, name="input") inq_input = tf.quantization.fake_quant_with_min_max_args( in_input, min=-100, max=100, name="q_input" ) diagonal = np.random.uniform(0, 256, diagonal_shape).astype("uint8") in_diagonal = tf.placeholder(dtype="float32", sh
ape=diagonal.shape, name="diagonal") inq_diagonal = tf.quantization.fake_quant_with_min_max_args( in_diagonal, min=-100, max=100, name="q_diagonal" ) input_range = {"q_input": (-100, 100), "q_diagonal": (-100, 100)} out = array_ops.matrix_set_diag(inq_input, inq_diagonal) out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out") compare_tflite_with_tvm( [input_array, diagonal], ["q_input", "q_diagonal"], [inq_input, inq_diagonal], [out], quantized=True, input_range=input_range, ) else: input_array = np.random.uniform(0, 100, input_shape).astype(input_type) diagonal = np.random.uniform(0, 100, diagonal_shape).astype(input_type) in_input = tf.placeholder( dtype=input_array.dtype, shape=input_array.shape, name="input" ) in_diagonal = tf.placeholder( dtype=diagonal.dtype, shape=diagonal.shape, name="diagonal" ) out = array_ops.matrix_set_diag(in_input, in_diagonal) compare_tflite_with_tvm( [input_array, diagonal], ["input", "diagonal"], [in_input, in_diagonal], [out] ) def test_forward_matrix_set_diag(): """MATRIX_SET_DIAG""" for dtype in [np.float32, np.int32]: _test_matrix_set_diag((4, 4), dtype) _test_matrix_set_diag((5, 4, 3, 4), dtype) _test_matrix_set_diag((4, 4, 2), dtype) _test_matrix_set_diag((4, 4), np.uint8, quantized=True) _test_matrix_set_diag((5, 4, 3, 4), np.uint8, quantized=True) _test_matrix_set_diag((4, 4, 2), np.uint8, quantized=True) def _test_matrix_diag(diagonal_shape, dtype): """One iteration of MATRIX_DIAG""" with tf.Graph().as_default(): diagonal = np.random.uniform(0, 100, diagonal_shape).astype(dtype) in_diagonal = tf.placehol
der(dtype=diagonal.dtype, shape=diagonal.shape, name="diagonal") out = array_ops.matrix_diag(in_diagonal) compare_tflite_with_tvm( [diagonal], ["diagonal"], [in_diagonal], [out], experimental_new_converter=True ) def test_forward_matrix_diag(): """MATRIX_DIAG""" for dtype in [np.float32, np.int32]: _test_matrix_diag((4), dtype) _test_matrix_diag((5, 4, 3), dtype) _test_matrix_diag((2, 3), dtype) def _test_detection_postprocess(tf_model_file, box_encodings_size, class_predictions_size): """One iteration of detection postProcess with given model and shapes""" converter = tf.lite.TFLiteConverter.from_frozen_graph( tf_model_file, input_arrays=["raw_outputs/box_encodings", "raw_outputs/class_predictions"], output_arrays=[ "TFLite_Detection_PostProcess", "TFLite_Detection_PostProcess:1", "TFLite_Detection_PostProcess:2", "TFLite_Detection_PostProcess:3", ], input_shapes={ "raw_outputs/box_encodings": box_encodings_size, "raw_outputs/class_predictions": class_predictions_size, }, ) converter.allow_custom_ops = True converter.inference_type = tf.lite.constants.FLOAT tflite_model = converter.convert() np.random.seed(0) box_encodings = np.random.uniform(size=box_encodings_size).astype("float32") class_predictions = np.random.uniform(size=class_predictions_size).astype("float32") tflite_output = run_tflite_graph(tflite_model, [box_encodings, class_predictions]) tvm_output = run_tvm_graph( tflite_model, [box_encodings, class_predictions], ["raw_outputs/box_encodings", "raw_outputs/class_predictions"], num_output=4, ) assert all( list( tvm_tensor.shape == tflite_tensor.shape for (tvm_tensor, tflite_tensor) in zip(tvm_output, tflite_output) ) ) assert tvm_output[3] == tflite_output[3] v
alid_count = tvm_output[3][0] for i in range(0, valid_count): tvm.testing.assert_allclose( np.squeeze(tvm_output[0][0][i]), np.squeeze(tflite_output[0][0][i]), rtol=1e-5, atol=1e-5, ) np.testing.assert_equal(np.squeeze(tvm_output[1][0][i]), np.squeeze(tflite_output[1][0][i])) tvm.testing.assert_allclose( np.squeeze(tvm_output[2][0][i]), np.squeeze(tflite_output[2][0][i]), rtol=1e-5, atol=1e-5, ) def test_detection_postprocess(): """Detection PostProcess""" box_encodings_size = (1, 1917, 4) class_predictions_size = (1, 1917, 91) tf_model_file = tf_testing.get_workload_official( "http: "ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03.tar.gz", "ssd_mobilenet_v2_quantized_300x300_coco_2019_01_03/tflite_graph.pb", ) _test_detection_postprocess(tf_model_file, box_encodings_size, class_predictions_size) box_encodings_size = (1, 2034, 4) class_predictions_size = (1, 2034, 91) tf_model_file = download_testdata( "https: "tflite_graph_with_postprocess.pb", ) _test_detection_postprocess(tf_model_file, box_encodings_size, class_predictions_size) def test_custom_op_converter(): """Test case for user-defined operator converter in TFLite frontend"""
class DummyOperatorConverter(relay.frontend.tflite.OperatorConverter): """Operator Converter for converting TFLite ops to relay ops""" def __init__(self, model, subgraph, exp_tab): super().__init__(model, subgraph, exp_tab) self.allow_custom_ops = True convert_map_overwrite = {"SUB": self.convert_sub_dummy} self.convert_map.update(convert_map_overwrite) def convert_sub_dummy(self, op): """Convert TFLite SUB""" input_tensors = self.get_input_tensors(op) assert len(input_tensors) == 2, "input tensors length should be 2" lhs_tensor = input_tensors[0] rhs_tensor = input_tensors[1] lhs_expr = self.get_expr(lhs_tensor.tensor_idx) rhs_expr = self.get_expr(rhs_tensor.tensor_idx) temp_expr = relay.op.negative(rhs_expr) out = relay.op.add(lhs_expr, temp_expr) return out with tf.Graph().as_default(): data = [ np.arange(6.0, dtype=np.float32).reshape((2, 1, 1, 3)), np.arange(1.0, 7.0, dtype=np.float32).reshape((2, 1, 1, 3)), ] in_data = [ array_ops.placeholder(shape=data[0].shape, dtype="float32", name="in_0"), array_ops.placeholder(shape=data[1].shape, dtype="float32", name="in_1"), ] out = math_ops.subtract(in_data[0], in_data[1]) in_name = [x[1] for x in zip(in_data, ("in_0:0", "in_1:0"))] input_tensors = in_data output_tensors = [out] in_node = [0] * len(in_name) for i, _ in enumerate(in_name): in_node[i] = in_name[i].split(":")[0] with tf.Session() as sess: converter = tf.lite.TFLiteConverter.from_session(sess, input_tensors, output_tensors) tflite_model_buf = converter.convert() in_data = [x[1] for x in zip(in_data, data)] tvm_output_orig = run_tvm_graph(tflite_model_buf, in_data, in_node) tvm_output_dummy = run_tvm_graph(
tflite_model_buf, in_data, in_node, op_converter=DummyOperatorConverter ) tvm.testing.assert_allclose( np.squeeze(tvm_output_orig[0]), np.squeeze(tvm_output_dummy[0]), rtol=1e-5, atol=1e-5 ) def test_forward_mobilenet_v1(): """Test the Mobilenet V1 TF Lite model.""" tflite_model_file = tf_testing.get_workload_official( "http: "mobilenet_v1_1.0_224.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32") tflite_output = run_tflite_graph(tflite_model_buf, data) tvm_output = run_tvm_graph(tflite_model_buf, data, "input") tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5 ) def test_forward_mobilenet_v2(): """Test the Mobilenet V2 TF Lite model.""" tflite_model_file = tf_testing.get_workload_official( "http: "mobilenet_v2_1.0_224.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32") tflite_output = run_tflite_graph(tflite_model_buf, data) tvm_output = run_tvm_graph(tflite_model_buf, data, "input") tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5 ) def test_forward_mobilenet_v3(): """Test the Mobilenet V3 TF Lite model.""" if package_version.parse(tf.VERSION) < package_version.parse("1.15.0"): return tflite_model_file = tf_testing.get_workload_official( "https: "v3-large_224_1.0_float/v3-large_224_1.0_float.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32") tflite_output = run_tflite_graph(tflite_model_buf, data) tvm_output = run_tvm_graph(tflite_model_buf, data, "input") tvm.testi
ng.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5 ) def test_forward_sparse_mobilenet_v1(): """Test the Sparse version of Mobilenet V1 TF Lite model.""" tflite_model_file = download_testdata( "https: "mbv1_140_90_12b4_720.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32") tflite_output = run_tflite_graph(tflite_model_buf, data) tvm_output = run_tvm_graph(tflite_model_buf, data, "float_image_input") tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5 ) def test_forward_sparse_mobilenet_v2(): """Test the Sparse version of Mobilenet V2 TF Lite model.""" tflite_model_file = download_testdata( "https: "mbv2_200_85_11-16b2_744.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = np.random.uniform(size=(1, 224, 224, 3)).astype("float32") tflite_output = run_tflite_graph(tflite_model_buf, data) tvm_output = run_tvm_graph(tflite_model_buf, data, "float_image_input") tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5 ) def test_forward_inception_v3_net(): """Test the Inception V3 TF Lite model.""" tflite_model_file = tf_testing.get_workload_official( "https: "upload_20180427/inception_v3_2018_04_27.tgz", "inception_v3.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32") tflite_output = run_tflite_graph(tflite_model_buf, data) tvm_output = run_tvm_graph(tflite_model_buf, data, "input") tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5 )
def test_forward_inception_v4_net(): """Test the Inception V4 TF Lite model.""" tflite_model_file = tf_testing.get_workload_official( "https: "tflite/model_zoo/upload_20180427/" "inception_v4_2018_04_27.tgz", "inception_v4.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = np.random.uniform(size=(1, 299, 299, 3)).astype("float32") tflite_output = run_tflite_graph(tflite_model_buf, data) tvm_output = run_tvm_graph(tflite_model_buf, data, "input") tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5 ) def test_forward_inception_v4_net_batched(): """Test the Inception V4 TF Lite model.""" tflite_model_file = tf_testing.get_workload_official( "https: "tflite/model_zoo/upload_20180427/" "inception_v4_2018_04_27.tgz", "inception_v4.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = np.random.uniform(size=(4, 299, 299, 3)).astype("float32") tflite_output = run_tflite_graph(tflite_model_buf, data) tvm_output = run_tvm_graph(tflite_model_buf, data, "input") tvm.testing.assert_allclose( np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-5, atol=1e-5 ) def test_forward_qnn_inception_v1_net(): """Test the Quantized TFLite Inception model.""" tflite_model_file = tf_testing.get_workload_official( "https: "inception_v1_224_quant_20181026.tgz", "inception_v1_224_quant.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = get_real_image(224, 224) tflite_output = run_tflite_graph(tflite_model_buf, data) tflite_predictions = np.squeeze(tflite_output) tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1] tvm_output = run_tvm_graph(tflite_model_buf, data, "input") tvm_predictions = np
.squeeze(tvm_output) tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1] tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels) def test_forward_qnn_mobilenet_v1_net(): """Test the Quantized TFLite Mobilenet V1 model.""" tflite_model_file = tf_testing.get_workload_official( "https: "mobilenet_v1_1.0_224_quant.tgz", "mobilenet_v1_1.0_224_quant.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = get_real_image(224, 224) tflite_output = run_tflite_graph(tflite_model_buf, data) tflite_predictions = np.squeeze(tflite_output) tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1] tvm_output = run_tvm_graph(tflite_model_buf, data, "input") tvm_predictions = np.squeeze(tvm_output) tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1] tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels) def test_forward_qnn_mobilenet_v2_net(): """Test the Quantized TFLite Mobilenet V2 model.""" tflite_model_file = tf_testing.get_workload_official( "https: "mobilenet_v2_1.0_224_quant.tgz", "mobilenet_v2_1.0_224_quant.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = get_real_image(224, 224) tflite_output = run_tflite_graph(tflite_model_buf, data) tflite_predictions = np.squeeze(tflite_output) tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1] tvm_output = run_tvm_graph(tflite_model_buf, data, "input") tvm_predictions = np.squeeze(tvm_output) tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1] tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels) def test_forward_qnn_mobilenet_v3_net(): """Test the Quantized TFLite Mobilenet V3 model.""" if package_version.parse(tf.VERSION) < package_version.parse("1.15.0"): pytest.skip("Unsupported in tflite < 1.15.0")
else: pytest.skip("This segfaults with tensorflow 1.15.2 and above") tflite_model_file = tf_testing.get_workload_official( "https: "v3-large_224_1.0_uint8/v3-large_224_1.0_uint8.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = get_real_image(224, 224) tflite_output = run_tflite_graph(tflite_model_buf, data) tflite_predictions = np.squeeze(tflite_output) tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1] tvm_output = run_tvm_graph(tflite_model_buf, data, "input") tvm_predictions = np.squeeze(tvm_output) tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1] tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels) def test_forward_tflite2_qnn_resnet50(): """Test the Quantized TFLite version 2.1.0 Resnet50 model.""" if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"): tflite_model_file = download_testdata( "https: "resnet_50_quantized.tflite", "resnet_50_quantized.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = pre_processed_image(224, 224) tflite_output = run_tflite_graph(tflite_model_buf, data) tflite_predictions = np.squeeze(tflite_output) tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1] tvm_output = run_tvm_graph(tflite_model_buf, np.array(data), "input_1") tvm_predictions = np.squeeze(tvm_output) tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1] tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels) def test_forward_tflite2_qnn_inception_v1(): """Test the Quantized TFLite version 2.1.0 Inception V1 model.""" if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"): tflite_model_file = download_testdata( "https: "inception_v1_quantized.tflite", "ince
ption_v1_quantized.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = pre_processed_image(224, 224) tflite_output = run_tflite_graph(tflite_model_buf, data) tflite_predictions = np.squeeze(tflite_output) tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1] tvm_output = run_tvm_graph(tflite_model_buf, np.array(data), "input_1") tvm_predictions = np.squeeze(tvm_output) tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1] tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels) def test_forward_tflite2_qnn_mobilenet_v2(): """Test the Quantized TFLite version 2.1.0 Mobilenet V2 model.""" if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"): tflite_model_file = download_testdata( "https: "mobilenet_v2_quantized.tflite", "mobilenet_v2_quantized.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = pre_processed_image(224, 224) tflite_output = run_tflite_graph(tflite_model_buf, data) tflite_predictions = np.squeeze(tflite_output) tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1] tvm_output = run_tvm_graph(tflite_model_buf, np.array(data), "input_1") tvm_predictions = np.squeeze(tvm_output) tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1] tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels) def test_forward_tflite_float16(): """Test float16 quantized model""" tflite_model_file = tf_testing.get_workload_official( "https: "mobilenet_v1_0.25_128.tgz", "mobilenet_v1_0.25_128_frozen.pb", ) converter = tf.lite.TFLiteConverter.from_frozen_graph( tflite_model_file, ["input"], ["MobilenetV1/Predictions/Reshape_1"] ) converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.
target_spec.supported_types = [tf.float16] tflite_model_buf = converter.convert() data = get_real_image(128, 128, quantized=False) tflite_output = run_tflite_graph(tflite_model_buf, data) tflite_predictions = np.squeeze(tflite_output) tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1] tvm_output = run_tvm_graph(tflite_model_buf, data, "input") tvm_predictions = np.squeeze(tvm_output) tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1] tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels) def test_forward_mobilenet_int16(): """Test int16 quantized model""" model_file = tf_testing.get_workload_official( "https: "mobilenet_v1_0.25_128.tgz", "mobilenet_v1_0.25_128_frozen.pb", ) data = get_real_image(128, 128, quantized=False) converter = tf.lite.TFLiteConverter.from_frozen_graph( model_file, ["input"], ["MobilenetV1/Predictions/Reshape_1"] ) def representative_dataset(): for _ in range(1): yield [data] converter.optimizations = [tf.lite.Optimize.DEFAULT] converter.target_spec.supported_ops = [ tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8 ] converter.representative_dataset = representative_dataset tflite_model_buf = converter.convert() tflite_output = run_tflite_graph(tflite_model_buf, data) tflite_predictions = np.squeeze(tflite_output) tflite_sorted_labels = tflite_predictions.argsort()[-3:][::-1] tvm_output = run_tvm_graph(tflite_model_buf, data, "input") tvm_predictions = np.squeeze(tvm_output) tvm_sorted_labels = tvm_predictions.argsort()[-3:][::-1] tvm.testing.assert_allclose(tvm_sorted_labels, tflite_sorted_labels) def test_forward_unidirectional_sequence_lstm(): """Test the UnidirectionalSequenceLSTM TFLite""" if package_version.parse(tf.VERSION) >= package_version.parse("2.1.0"): tflite_model_fil
e = download_testdata( "https: "ce49c5de64889493161ca4194a20e0fd5eb707e6/lstm_1_in_3_out_2_ts_4.tflite?raw=true", "lstm_1_in_3_out_2_ts_4.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = np.array( [ [ [0.5488135, 0.71518934, 0.60276335], [0.5448832, 0.4236548, 0.6458941], [0.4375872, 0.891773, 0.96366274], [0.3834415, 0.79172504, 0.5288949], ] ], dtype="float32", ) tflite_output = run_tflite_graph(tflite_model_buf, data) tvm_output = run_tvm_graph(tflite_model_buf, data, "serving_default_input_1:0") tvm.testing.assert_allclose(tflite_output, tvm_output) def test_forward_qnn_coco_ssd_mobilenet_v1(): """Test the quantized Coco SSD Mobilenet V1 TF Lite model.""" pytest.skip( "LLVM bug - getExtendedVectorNumElements - " + "https: + "specific target, for example, llvm -mpcu=core-avx2" ) tflite_model_file = tf_testing.get_workload_official( "https: "coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip", "detect.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = get_real_image_object_detection(300, 300) tflite_output = run_tflite_graph(tflite_model_buf, data) tvm_output = run_tvm_graph( tflite_model_buf, data, "normalized_input_image_tensor", num_output=4 ) assert all( list( tvm_tensor.shape == tflite_tensor.shape for (tvm_tensor, tflite_tensor) in zip(tvm_output, tflite_output) ) ) assert tvm_output[3] == tflite_output[3] valid_count = tvm_output[3][0] for i in range(0, valid_count): if tvm_output[2][0][i] > 0.6: tvm.tes
ting.assert_allclose( np.squeeze(tvm_output[0][0][i]), np.squeeze(tflite_output[0][0][i]), rtol=1e-2, atol=1e-2, ) np.testing.assert_equal( np.squeeze(tvm_output[1][0][i]), np.squeeze(tflite_output[1][0][i]) ) tvm.testing.assert_allclose( np.squeeze(tvm_output[2][0][i]), np.squeeze(tflite_output[2][0][i]), rtol=1e-5, atol=1e-5, ) def test_forward_coco_ssd_mobilenet_v1(): """Test the FP32 Coco SSD Mobilenet V1 TF Lite model.""" tflite_model_file = tf_testing.get_workload_official( "https: "ssd_mobilenet_v1_coco_2018_01_28.tgz", "ssd_mobilenet_v1_coco_2018_01_28.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() np.random.seed(0) data = np.random.uniform(size=(1, 300, 300, 3)).astype("float32") tflite_output = run_tflite_graph(tflite_model_buf, data) tvm_output = run_tvm_graph( tflite_model_buf, data, "normalized_input_image_tensor", num_output=4 ) assert all( list( tvm_tensor.shape == tflite_tensor.shape for (tvm_tensor, tflite_tensor) in zip(tvm_output, tflite_output) ) ) assert tvm_output[3] == tflite_output[3] valid_count = tvm_output[3][0] for i in range(0, valid_count): tvm.testing.assert_allclose( np.squeeze(tvm_output[0][0][i]), np.squeeze(tflite_output[0][0][i]), rtol=1e-5, atol=1e-5, ) np.testing.assert_equal(np.squeeze(tvm_output[1][0][i]), np.squeeze(tflite_output[1][0][i])) tvm.testing.assert_allclose( np.squeeze(tvm_output[2][0][i]), np.squeeze(tflite_output[2][0][i]), rtol=1e-5, atol=1e-5, ) def test_
forward_mediapipe_hand_landmark(): """Test MediaPipe 2D hand landmark TF Lite model.""" tflite_model_file = download_testdata( "https: "hand_landmark.tflite", ) with open(tflite_model_file, "rb") as f: tflite_model_buf = f.read() data = np.random.uniform(size=(1, 256, 256, 3)).astype("float32") tflite_output = run_tflite_graph(tflite_model_buf, data) tvm_output = run_tvm_graph(tflite_model_buf, data, "input_1", num_output=2) for i in range(2): tvm.testing.assert_allclose( np.squeeze(tvm_output[i]), np.squeeze(tflite_output[i]), rtol=1e-5, atol=1e-5 ) def test_prevent_tensorflow_dynamic_range(): """ Should prevent running "dynamic range quantization" optimized TFLite graph """ data_array = np.random.randint(0, 2, (1, 1024, 1024)).astype(dtype=np.float32) filter_array = np.random.randint(0, 2, (1024, 1024)).astype(dtype=np.float32) data_in = tf.keras.layers.Input(shape=data_array.shape[1:]) dense = tf.keras.layers.Dense(units=filter_array.shape[-1], use_bias=False)(data_in) keras_model = tf.keras.models.Model(data_in, dense) keras_model.layers[1].set_weights([filter_array]) converter = interpreter_wrapper.TFLiteConverter.from_keras_model(keras_model) converter.optimizations = [tf.lite.Optimize.DEFAULT] tflite_model = converter.convert() with pytest.raises(tvm.error.OpNotImplemented): _ = run_tvm_graph(tflite_model, data_array, data_in.name.replace(":0", "")) def _test_nms_v5( bx_shape, score_shape, iou_threshold, score_threshold, max_output_size, dtype="float32" ): """One iteration of nms_v5 with given attributes""" boxes = np.random.uniform(0, 10, size=bx_shape).astype(dtype) scores = np.random.uniform(size=score_shape).astype(dtype) tf.reset_default_graph() tf.compat.v1.disable_eager_execution() in_data_1 = array_ops.placeholder(dtype, boxes.shape, name="in_data_1") in_data_2 = array_ops.placeholder(dtype, scores.shape, name="
in_data_2") out = image_ops.non_max_suppression_with_scores( boxes=in_data_1, scores=in_data_2, max_output_size=max_output_size, iou_threshold=iou_threshold, score_threshold=score_threshold, name="nms", ) compare_tflite_with_tvm( [boxes, scores], ["in_data_1:0", "in_data_2:0"], [in_data_1, in_data_2], [out[0], out[1]], out_names=[out[0].name, out[1].name], experimental_new_converter=True, ) def test_forward_nms_v5(): """test nms_v5""" _test_nms_v5((10000, 4), (10000,), 0.5, 0.4, 100) _test_nms_v5((1000, 4), (1000,), 0.7, 0.3, 50) if __name__ == "__main__": test_forward_batch_to_space_nd() test_forward_space_to_batch_nd() test_forward_split() test_forward_transpose() test_forward_cast() test_forward_batch_matmul() test_forward_tile() test_forward_shape() test_forward_concatenation() test_forward_pad() test_forward_pack() test_forward_unpack() test_forward_reshape() test_all_resize() test_forward_range() test_forward_squeeze() test_forward_slice() test_forward_topk() test_forward_gather() test_forward_gather_nd() test_forward_stridedslice() test_forward_depthtospace() test_forward_spacetodepth() test_forward_reverse_sequence() test_forward_sparse_to_dense() test_forward_select() test_forward_quantize_dequantize() test_forward_arg_min_max() test_forward_expand_dims() test_forward_reverse_v2() test_forward_matrix_set_diag() test_forward_matrix_diag() test_forward_convolution() test_forward_transpose_conv() test_forward_logistic() test_forward_pooling() test_forward_l2_pool2d() test_forward_softmax() test_forward_tanh() test_forward_relu() test_forward_relu6() test_forward_leaky_relu() test_forward_relu_n1_to_1() test_forward_log_softmax() test_forward_f
ully_connected() test_forward_l2_normalization() test_forward_local_response_normalization() test_forward_prelu() test_forward_unidirectional_sequence_lstm() test_all_elemwise() test_forward_add_n() test_all_unary_elemwise() test_forward_zeros_like() test_forward_fill() test_all_reduce() test_all_logical() test_detection_postprocess() test_forward_nms_v5() test_custom_op_converter() test_forward_mobilenet_v1() test_forward_mobilenet_v2() test_forward_mobilenet_v3() test_forward_inception_v3_net() test_forward_inception_v4_net() test_forward_inception_v4_net_batched() test_forward_coco_ssd_mobilenet_v1() test_forward_mediapipe_hand_landmark() test_forward_sparse_mobilenet_v1() test_forward_sparse_mobilenet_v2() test_forward_qnn_inception_v1_net() test_forward_qnn_mobilenet_v1_net() test_forward_qnn_mobilenet_v2_net() test_forward_qnn_mobilenet_v3_net() test_forward_qnn_coco_ssd_mobilenet_v1() test_forward_quantized_convolution() test_forward_quantized_depthwise_convolution() test_forward_tflite2_qnn_resnet50() test_forward_tflite2_qnn_inception_v1() test_forward_tflite2_qnn_mobilenet_v2() test_forward_tflite_float16() test_forward_tflite_int16()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Infrastructure and tests for e2e integration tests."""
"""Test arm mprofile dsp."""
import numpy as np
import pytest
import tvm
import tvm.testing from tvm
import relay from tvm.testing.aot
import AOTTestModel, compile_and_run, generate_ref_data from tvm.micro.testing.aot_test_utils
import AOT_CORSTONE300_RUNNER @tvm.testing.requires_corstone300 @pytest.mark.parametrize( "data_shape_nhwc, kernel_size, num_filter, strides, padding, dilation", [ ((1, 32, 32, 1), (3, 3), 12, 1, 0, 1), ((1, 32, 10, 3), (3, 3), 16, 1, 0, 1), ((1, 49, 10, 1), (10, 4), 64, (2, 1), (4, 1, 5, 1), 1), ((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 1), ((1, 32, 32, 16), (3, 3), 16, 1, 0, 1), ((1, 32, 32, 16), (3, 3), 16, 1, 0, 1), ((1, 32, 32, 16), (3, 3), 16, 1, (0, 2, 2, 0), 2), ((1, 32, 32, 16), (3, 3), 16, 1, (1, 1, 2, 2), 2), ((1, 49, 10, 1), (10, 4), 64, (2, 2), (4, 1, 5, 1), 1), ((1, 96, 96, 3), (3, 3), 8, (2, 2), (0, 0, 1, 1), 1), ((1, 16, 16, 32), (1, 1), 64, (2, 2), 0, 1), ((4, 16, 16, 8), (5, 5), 8, 2, (0, 4, 4, 0), 1), ((4, 16, 16, 8), (5, 5), 16, 2, (0, 4, 4, 0), 1), ((4, 16, 16, 8), (5, 5), 8, 2, 0, 1), ((4, 16, 16, 8), (5, 5), 16, 2, 0, 1), ((1, 16, 16, 8), (3, 3), 16, 2, (0, 0, 1, 1), 1), ((1, 16, 16, 8), (3, 3), 16, 2, (1, 1, 2, 2), 1), ((1, 16, 16, 8), (5, 5), 16, 2, (3, 3, 2, 2), 1), ((1, 16, 16, 8), (3, 3), 16, 2, (0, 1, 2, 3), 1), ], ) @pytest.mark.parametrize("dtype", ["int8", "int16"]) def test_conv2d(data_shape_nhwc, kernel_size, num_filter, strides, padding, dilation, dtype): """Test a subgraph with a single conv2d operator.""" ishape = data_shape_nhwc wshape = (*kernel_size, data_shape_nhwc[-1], num_filter) weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype) input0 = relay.var("input", relay.TensorType(ishape, dtype)) weight0 = relay.const(weight_data) out0 = relay.op.nn.conv2d( input0, weight0, kernel_size=kernel_size, strides=strides, padding=padding, dilation=(dilation, dilation), data_layout="NHWC", kernel_layout="HWIO", out_dtype="int32", out_layout="NHWC", )
ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0)) input1 = relay.var("input", relay.TensorType(ishape, dtype)) weight1 = relay.const(np.moveaxis(weight_data, 2, -1)) out1 = relay.op.nn.conv2d( input1, weight1, kernel_size=kernel_size, strides=strides, padding=padding, dilation=(dilation, dilation), data_layout="NHWC", kernel_layout="HWOI", out_dtype="int32", out_layout="NHWC", ) mod = tvm.IRModule.from_expr(relay.Function([input1], out1)) inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)} output_list = generate_ref_data(ref_mod, inputs) compile_and_run( AOTTestModel(module=mod, inputs=inputs, outputs=output_list), runner=AOT_CORSTONE300_RUNNER, interface_api="c", use_unpacked_api=True, target_opts={ "-keys": "arm_cpu", "-mcpu": "cortex-m7", }, ) @tvm.testing.requires_corstone300 @pytest.mark.parametrize( "data_shape_nwc, kernel_size, num_filter, strides, padding", [ ((1, 32, 12), 3, 16, 1, 0), ((3, 12, 10), 4, 24, 1, 0), ((1, 7, 7), 3, 5, 1, 0), ((1, 10, 2), 4, 4, 2, (1, 1)), ((1, 20, 2), 4, 4, 2, (0, 1)), ((1, 16, 4), 1, 12, 1, (1, 0)), ((1, 24, 16), 1, 32, 3, (2, 2)), ], ) @pytest.mark.parametrize("dtype", ["int8", "int16"]) def test_conv1d(data_shape_nwc, kernel_size, num_filter, strides, padding, dtype): """Test a subgraph with a single conv1d operator.""" ishape = data_shape_nwc wshape = (kernel_size, data_shape_nwc[-1], num_filter) weight_data = np.random.randint(low=-10, high=10, size=wshape, dtype=dtype) input0 = relay.var("input", relay.TensorType(ishape, dtype)) weight0 = relay.const(weight_data) out0 = relay.op.nn.conv1d( input0, weight0, strides=strides, padding=padding, data_layout="NWC", kernel_layout="WIO", out
_dtype="int32", out_layout="NWC", ) ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0)) input1 = relay.var("input", relay.TensorType(ishape, dtype)) weight1 = relay.const(np.moveaxis(weight_data, 1, -1)) out1 = relay.op.nn.conv1d( input1, weight1, strides=strides, padding=padding, data_layout="NWC", kernel_layout="WOI", out_dtype="int32", out_layout="NWC", ) mod = tvm.IRModule.from_expr(relay.Function([input1], out1)) inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype=dtype)} output_list = generate_ref_data(ref_mod, inputs) compile_and_run( AOTTestModel(module=mod, inputs=inputs, outputs=output_list), runner=AOT_CORSTONE300_RUNNER, interface_api="c", use_unpacked_api=True, target_opts={ "-keys": "arm_cpu", "-mcpu": "cortex-m7", }, ) @tvm.testing.requires_corstone300 @pytest.mark.parametrize( "dim_m, dim_k, dim_n", [ (1, 32, 64), (3, 12, 10), ], ) def test_dense(dim_m, dim_k, dim_n): """Test a subgraph with a single dense operator.""" ishape = (dim_m, dim_k) wshape = (dim_n, dim_k) input0 = relay.var("input", relay.TensorType(ishape, "int8")) dense_f = relay.op.nn.batch_flatten(input0) weight0 = relay.const(np.random.randint(low=-10, high=10, size=wshape, dtype="int8")) out = relay.op.nn.dense(dense_f, weight0, out_dtype="int32") mod = tvm.IRModule.from_expr(relay.Function([input0], out)) inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype="int8")} output_list = generate_ref_data(mod, inputs) compile_and_run( AOTTestModel(module=mod, inputs=inputs, outputs=output_list), runner=AOT_CORSTONE300_RUNNER, interface_api="c", use_unpacked_api=True, target_opts={ "-keys": "arm_cpu", "-mcpu": "cortex-m7", }, ) @tvm.testing
.requires_corstone300 @pytest.mark.parametrize( "data_shape_nhwc, pool_size, strides, padding", [ ((1, 32, 32, 1), (3, 3), 1, 0), ((1, 32, 20, 4), (3, 3), (2, 2), 0), ], ) def test_maxpool_2d(data_shape_nhwc, pool_size, strides, padding): """Test a subgraph with a single maxpool_2d operator.""" ishape = data_shape_nhwc input0 = relay.var("input", relay.TensorType(ishape, "int8")) out = relay.op.nn.max_pool2d(input0, pool_size, layout="NHWC", strides=strides, padding=padding) mod = tvm.IRModule.from_expr(relay.Function([input0], out)) inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype="int8")} output_list = generate_ref_data(mod, inputs) compile_and_run( AOTTestModel(module=mod, inputs=inputs, outputs=output_list), runner=AOT_CORSTONE300_RUNNER, interface_api="c", use_unpacked_api=True, target_opts={ "-keys": "arm_cpu", "-mcpu": "cortex-m7", }, ) @tvm.testing.requires_corstone300 @pytest.mark.parametrize( "data_shape_nwc, pool_size, strides, padding", [ ((1, 32, 1), 3, 1, 0), ((1, 20, 4), 3, 2, 0), ], ) def test_maxpool_1d(data_shape_nwc, pool_size, strides, padding): """Test a subgraph with a single maxpool_1d operator.""" ishape = data_shape_nwc input0 = relay.var("input", relay.TensorType(ishape, "int8")) out = relay.op.nn.max_pool1d(input0, pool_size, layout="NWC", strides=strides, padding=padding) mod = tvm.IRModule.from_expr(relay.Function([input0], out)) inputs = {"input": np.random.randint(low=-128, high=127, size=ishape, dtype="int8")} output_list = generate_ref_data(mod, inputs) compile_and_run( AOTTestModel(module=mod, inputs=inputs, outputs=output_list), runner=AOT_CORSTONE300_RUNNER, interface_api="c", use_unpacked_api=True, target_opts={ "-keys": "arm_cpu", "-mcpu": "cortex-m7", }, ) @tvm.testing.r
equires_corstone300 @pytest.mark.parametrize( "data_shape_nchw, pool_size, strides, padding", [ ((1, 1, 32, 32), (3, 3), 1, 0), ((1, 4, 32, 20), (3, 3), (2, 2), 0), ], ) def test_avgpool_2d(data_shape_nchw, pool_size, strides, padding): """Test a subgraph with a single avgpool_2d operator.""" ishape = data_shape_nchw input0 = relay.var("input", relay.TensorType(ishape, "int32")) out0 = relay.nn.avg_pool2d(input0, pool_size, layout="NCHW", strides=strides, padding=padding) ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0)) input1 = relay.var("input", relay.TensorType(ishape, "int16")) out1 = relay.op.nn.avg_pool2d( input1, pool_size, layout="NCHW", strides=strides, padding=padding ) mod = tvm.IRModule.from_expr(relay.Function([input1], out1)) input_data = np.random.randint(low=-128, high=127, size=ishape, dtype="int32") inputs = {"input": input_data} output_list = generate_ref_data(ref_mod, inputs) compile_and_run( AOTTestModel( module=mod, inputs={"input": input_data.astype(dtype="int16")}, outputs=output_list ), runner=AOT_CORSTONE300_RUNNER, interface_api="c", use_unpacked_api=True, target_opts={ "-keys": "arm_cpu", "-mcpu": "cortex-m7", }, ) @tvm.testing.requires_corstone300 @pytest.mark.parametrize( "data_shape_ncw, pool_size, strides, padding", [ ((1, 1, 32), 3, 1, 0), ((1, 4, 20), 3, 2, 2), ], ) def test_avgpool_1d(data_shape_ncw, pool_size, strides, padding): """Test a subgraph with a single avgpool_1d operator.""" ishape = data_shape_ncw input0 = relay.var("input", relay.TensorType(ishape, "int32")) out0 = relay.op.nn.avg_pool1d(input0, pool_size, layout="NCW", strides=strides, padding=padding) ref_mod = tvm.IRModule.from_expr(relay.Function([input0], out0)) input1 = relay.var("input", relay.TensorType(ishape, "int16")) out1 = relay.op.nn.avg_poo
l1d(input1, pool_size, layout="NCW", strides=strides, padding=padding) mod = tvm.IRModule.from_expr(relay.Function([input1], out1)) input_data = np.random.randint(low=-10, high=10, size=ishape, dtype="int32") inputs = {"input": input_data} output_list = generate_ref_data(ref_mod, inputs) compile_and_run( AOTTestModel( module=mod, inputs={"input": input_data.astype(dtype="int16")}, outputs=output_list ), runner=AOT_CORSTONE300_RUNNER, interface_api="c", use_unpacked_api=True, target_opts={ "-keys": "arm_cpu", "-mcpu": "cortex-m7", }, ) if __name__ == "__main__": tvm.testing.main()
"""Integration test for MetaSchedule's auto tensorization."""
import tempfile
import numpy as np
import pytest
import tvm
import tvm.testing
import tvm.topi.testing from tvm
import meta_schedule as ms from tvm
import relay from tvm.meta_schedule.testing
import relay_workload from tvm.meta_schedule.testing.tlcbench
import load_quantized_bert_base from tvm.tir.tensor_intrin.arm_cpu
import DP4A_INTRIN from tvm.tir.tensor_intrin.rocm
import AMDGPU_SDOT4_INTRIN from tvm.tir.tensor_intrin.x86
import VNNI_DOT_16x4_INTRIN as VNNI_INTRIN SCH_RULES_FOR_VNNI = [ ms.schedule_rule.ApplyCustomRule(), ms.schedule_rule.AutoInline( into_producer=False, into_consumer=True, inline_const_tensor=True, disallow_if_then_else=True, require_injective=True, require_ordered=True, disallow_op=["tir.exp"], ), ms.schedule_rule.AddRFactor(max_jobs_per_core=16, max_innermost_factor=64), ms.schedule_rule.MultiLevelTilingWithIntrin( VNNI_INTRIN, structure="SSRSRS", tile_binds=None, max_innermost_factor=64, vector_load_lens=None, reuse_read=None, reuse_write=ms.schedule_rule.ReuseType( req="may", levels=[1, 2], scope="global", ), ), ms.schedule_rule.MultiLevelTiling( structure="SSRSRS", tile_binds=None, max_innermost_factor=64, vector_load_lens=None, reuse_read=None, reuse_write=ms.schedule_rule.ReuseType( req="may", levels=[1, 2], scope="global", ), ), ms.schedule_rule.ParallelizeVectorizeUnroll( max_jobs_per_core=16, max_vectorize_extent=64, unroll_max_steps=[0, 16, 64, 512], unroll_explicit=True, ), ms.schedule_rule.RandomComputeLocation(), ] def _get_sch_rules_for_dp4a(intrin): return [ ms.schedule_rule.MultiLevelTilingWithIntrin( intrin, structure="SSSRRSRS", tile_binds=["blockIdx.x", "vthread.x", "threadIdx.x"], max_innermost_factor=64, vector_load_lens=[1, 2, 3, 4], reuse_read=ms.schedule_rule.ReuseType( req="must", levels=[4], scope="shared", ), reuse_write=ms.schedule_rule.ReuseType( req="must", levels=[3], scope="local", ), ), ms.schedule_rule.AutoInline( into
_producer=True, into_consumer=True, inline_const_tensor=True, disallow_if_then_else=False, require_injective=False, require_ordered=False, disallow_op=None, ), ms.schedule_rule.CrossThreadReduction(thread_extents=[4, 8, 16, 32, 64, 128, 256, 512]), ms.schedule_rule.ParallelizeVectorizeUnroll( max_jobs_per_core=-1, max_vectorize_extent=-1, unroll_max_steps=[0, 16, 64, 512, 1024], unroll_explicit=True, ), ] SCH_RULES_FOR_DP4A = _get_sch_rules_for_dp4a(DP4A_INTRIN) SCH_RULES_FOR_SDOT4 = _get_sch_rules_for_dp4a(AMDGPU_SDOT4_INTRIN) POSTPROCS_FOR_VNNI = [ ms.postproc.DisallowDynamicLoop(), ms.postproc.RewriteParallelVectorizeUnroll(), ms.postproc.RewriteReductionBlock(), ms.postproc.RewriteTensorize(vectorize_init_loop=True), ] POSTPROCS_FOR_DP4A = [ ms.postproc.DisallowDynamicLoop(), ms.postproc.RewriteCooperativeFetch(), ms.postproc.RewriteUnboundBlock(), ms.postproc.RewriteParallelVectorizeUnroll(), ms.postproc.RewriteReductionBlock(), ms.postproc.RewriteTensorize(), ms.postproc.VerifyGPUCode(), ] def tune_and_test(relay_mod, data_np, weight_np, op_name, target, sch_rules, postprocs): """Test tuning.""" tgt = "cuda" if "nvidia" in target else target dev = tvm.device(tgt, 0) ref = ( relay.create_executor("vm", mod=relay_mod, device=dev, target=tgt) .evaluate()(*[data_np, weight_np]) .numpy() ) params = {"weight": weight_np} tune_tasks = list( filter( lambda task: op_name in task.task_name, ms.relay_integration.extract_tasks(relay_mod, target, params), ) ) with tempfile.TemporaryDirectory() as work_dir: tasks, task_weights = ms.relay_integration.extracted_tasks_to_tune_contexts( extracted_tasks=tune_tasks, work_dir=work_dir, space=ms.space_generator.PostOrderApply(
sch_rules=sch_rules, postprocs=postprocs, ), ) database = ms.tune.tune_tasks( tasks=tasks, task_weights=task_weights, work_dir=work_dir, max_trials_global=32, ) with database, tvm.transform.PassContext( opt_level=3, config={"relay.backend.use_meta_schedule": True}, ): lib = relay.build(relay_mod, target=target, params=params) if "cascadelake" in target: asm = lib.lib.get_source("asm") assert "vpdpbusd" in asm runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev)) runtime.set_input("data", data_np) runtime.run() out = runtime.get_output(0).numpy() np.testing.assert_equal(out, ref) def _test_dense(data_dtype, sch_rules, postprocs, target): dim_m, dim_n, dim_k = 1024, 1024, 1024 data_shape = (dim_m, dim_k) weight_shape = (dim_n, dim_k) weight_dtype = "int8" out_dtype = "int32" data = relay.var("data", shape=data_shape, dtype=data_dtype) weight = relay.var("weight", shape=weight_shape, dtype=weight_dtype) dense = relay.nn.dense(data, weight, out_dtype=out_dtype) relay_mod = tvm.IRModule.from_expr(dense) data_np = np.random.uniform(1, 10, size=data_shape).astype(data_dtype) weight_np = np.random.uniform(1, 10, size=weight_shape).astype(weight_dtype) tune_and_test(relay_mod, data_np, weight_np, "dense", target, sch_rules, postprocs) def _test_conv2d(data_dtype, sch_rules, postprocs, target): d_shape = (1, 64, 56, 56) w_shape = (64, 64, 3, 3) weight_dtype = "int8" out_dtype = "int32" data = relay.var("data", shape=d_shape, dtype=data_dtype) weight = relay.var("weight", shape=w_shape, dtype=weight_dtype) out_channel = w_shape[0] conv2d = relay.nn.conv2d( data=data, weight=weight, kernel_size=w_shape[2:], channels=out_channel, padding=(1, 1), strides=(1, 1), out_dtype=out_dtype,
) relay_mod = tvm.IRModule.from_expr(conv2d) data_np = np.random.uniform(1, 10, d_shape).astype(data_dtype) weight_np = np.random.uniform(1, 10, size=w_shape).astype("int8") tune_and_test(relay_mod, data_np, weight_np, "conv2d", target, sch_rules, postprocs) def _test_bert_int8(relay_mod, params, input_info, target, sch_rules, postprocs): relay_mod = relay.transform.FastMath()(relay_mod) tune_tasks = [ task for task in ms.relay_integration.extract_tasks(relay_mod, target, params) if "dense" in task.task_name or "batch_matmul" in task.task_name ] with tempfile.TemporaryDirectory() as work_dir: tasks, task_weights = ms.relay_integration.extracted_tasks_to_tune_contexts( extracted_tasks=tune_tasks, work_dir=work_dir, space=ms.space_generator.PostOrderApply( sch_rules=sch_rules, postprocs=postprocs, ), ) database = ms.tune.tune_tasks( tasks=tasks, task_weights=task_weights, work_dir=work_dir, max_trials_per_task=32, max_trials_global=20000, ) with database, tvm.transform.PassContext( opt_level=3, config={"relay.backend.use_meta_schedule": True}, ): lib = relay.build(relay_mod, target=target, params=params) dev = tvm.device("cuda" if "nvidia" in target else target, 0) runtime = tvm.contrib.graph_executor.GraphModule(lib["default"](dev)) inputs = [] for name, shape in input_info: arr = np.random.uniform(1, 10, size=shape).astype("int64") runtime.set_input(name, arr) inputs.append(arr) print(runtime.benchmark(dev, number=1, repeat=50).mean) @tvm.testing.requires_cascadelake def test_vnni_dense(): _test_dense( "uint8", SCH_RULES_FOR_VNNI, POSTPROCS_FOR_VNNI, "llvm -mcpu=cascadelake -num-cores 4" ) @pytest.mark.skip("Only tested locally on sm_86 (for cuda) which is not supported by CI") @tvm.tes
ting.requires_gpu def test_dp4a_dense(): _test_dense("int8", SCH_RULES_FOR_DP4A, POSTPROCS_FOR_DP4A, "nvidia/geforce-rtx-3070") @tvm.testing.requires_cascadelake def test_vnni_conv2d(): _test_conv2d( "uint8", SCH_RULES_FOR_VNNI, POSTPROCS_FOR_VNNI, "llvm -mcpu=cascadelake -num-cores 4" ) @pytest.mark.skip("Only tested locally on sm_86 (for cuda) which is not supported by CI") @tvm.testing.requires_gpu def test_dp4a_conv2d(): _test_conv2d("int8", SCH_RULES_FOR_DP4A, POSTPROCS_FOR_DP4A, "nvidia/geforce-rtx-3070") @tvm.testing.requires_cascadelake @pytest.mark.skip_if(tvm.testing.IS_IN_CI, reason="Slow on CI") def test_vnni_bert_int8(): relay_mod, params, input_info = load_quantized_bert_base() _test_bert_int8( relay_mod, params, input_info, "llvm -mcpu=cascadelake -num-cores 4", SCH_RULES_FOR_VNNI, POSTPROCS_FOR_VNNI, ) @tvm.testing.requires_gpu @pytest.mark.skip("Slow on CI") def test_dp4a_bert_int8(): relay_mod, params, input_info = load_quantized_bert_base() _test_bert_int8( relay_mod, params, input_info, "nvidia/geforce-rtx-3070", SCH_RULES_FOR_DP4A, POSTPROCS_FOR_DP4A, ) @tvm.testing.requires_gpu @pytest.mark.skip("Slow on CI") @pytest.mark.parametrize( ["model_name", "input_shape"], [("bert_base", (8, 128)), ("resnet_18", (16, 3, 224, 224)), ("resnet_50", (16, 3, 224, 224))], ) def test_cuda_tensor_core(model_name, input_shape): """Integration tests of auto tensorization with CUDA tensor core""" target = tvm.target.Target("nvidia/geforce-rtx-3070") dev = tvm.cuda() if model_name.startswith("bert"): data = tvm.nd.array(np.random.randint(0, 30521, size=input_shape), dev) else: data = tvm.nd.array(np.random.randn(*input_shape).astype("float32"), dev) mod, para
ms, (input_name, _, _) = relay_workload.get_network(model_name, input_shape) seq = tvm.transform.Sequential( [ relay.transform.ToMixedPrecision(), ] ) with tvm.transform.PassContext(opt_level=3): mod = seq(mod) def convert_layout(mod): seq = tvm.transform.Sequential( [relay.transform.ConvertLayout({"nn.conv2d": ["NHWC", "OHWI"]})] ) with tvm.transform.PassContext(opt_level=3): mod = seq(mod) return mod with tempfile.TemporaryDirectory() as work_dir: with ms.Profiler() as profiler: converted_mod = convert_layout(mod) database = ms.relay_integration.tune_relay( mod=converted_mod, target=target, work_dir=work_dir, max_trials_global=3000, params=params, ) rt_mod1 = ms.relay_integration.compile_relay( database=database, mod=converted_mod, target=target, params=params, ) print(profiler.table()) with tvm.transform.PassContext(opt_level=0): rt_mod2 = relay.build(mod, target=target, params=params) def get_output(data, lib): module = tvm.contrib.graph_executor.GraphModule(lib["default"](dev)) module.set_input(input_name, data) module.run() return module.get_output(0).numpy() actual_output = get_output(data, rt_mod1) expected_output = get_output(data, rt_mod2) assert np.allclose(actual_output, expected_output, rtol=1e-2, atol=2e-2) if __name__ == "__main__": tvm.testing.main()
"""Test scheduling and running a dot product."""
import numpy as np
import tvm
import tvm.testing from tvm
import te @tvm.testing.requires_llvm def test_dot(): """Test dot product.""" arr_length = 12 arr_length_tvm = tvm.runtime.convert(arr_length) placeholder_a = te.placeholder((arr_length_tvm,), name="A") placeholder_b = te.placeholder((arr_length_tvm,), name="B") reduce_axis_k = te.reduce_axis((0, arr_length_tvm), "k") result_c = te.compute( (), lambda: te.sum( placeholder_a[reduce_axis_k] * placeholder_b[reduce_axis_k], axis=reduce_axis_k ), name="C", ) schedule = te.create_schedule(result_c.op) def verify(target): f = tvm.driver.build(schedule, [placeholder_a, placeholder_b, result_c], target) dev = tvm.cpu(0) buff_a = tvm.nd.array( np.random.uniform(size=(arr_length,)).astype(placeholder_a.dtype), dev ) buff_b = tvm.nd.array( np.random.uniform(size=(arr_length,)).astype(placeholder_b.dtype), dev ) buff_c = tvm.nd.array(np.zeros((), dtype=result_c.dtype), dev) f(buff_a, buff_b, buff_c) tvm.testing.assert_allclose( buff_c.numpy(), np.dot(buff_a.numpy(), buff_b.numpy()), rtol=1e-4 ) verify("llvm") if __name__ == "__main__": test_dot()
"""Test elementwise integration."""
import numpy as np
import tvm
import tvm.testing from tvm
import te from tvm.contrib
import nvcc @tvm.testing.requires_gpu def test_exp(): """Test scheduling and running exponent.""" arr_length = 1024 arr_length_tvm = tvm.runtime.convert(arr_length) placeholder_a = te.placeholder((arr_length_tvm,), name="A") placeholder_b = te.compute(placeholder_a.shape, lambda *i: te.exp(placeholder_a(*i)), name="B") schedule = te.create_schedule(placeholder_b.op) num_thread = 8 axis1, axis2 = schedule[placeholder_b].split(placeholder_b.op.axis[0], factor=num_thread) schedule[placeholder_b].bind(axis1, te.thread_axis("blockIdx.x")) schedule[placeholder_b].bind(axis2, te.thread_axis("threadIdx.x")) def check_device(device, host="stackvm"): if not tvm.testing.device_enabled(host): return dev = tvm.device(device, 0) if not tvm.testing.device_enabled(device): print("skip because %s is not enabled.." % device) return fexp = tvm.build(schedule, [placeholder_a, placeholder_b], device, host, name="myexp") dev = tvm.device(device, 0) buff_a = tvm.nd.array(np.random.uniform(size=arr_length).astype(placeholder_a.dtype), dev) buff_b = tvm.nd.array(np.zeros(arr_length, dtype=placeholder_b.dtype), dev) fexp(buff_a, buff_b) tvm.testing.assert_allclose(buff_b.numpy(), np.exp(buff_a.numpy()), rtol=1e-5) check_device("opencl -device=intel_graphics") check_device("cuda", "llvm") check_device("vulkan") @tvm.testing.requires_gpu def test_fmod(): """Test scheduling and running fmod.""" def run(dtype): size_var_n = te.size_var("n") placeholder_a = te.placeholder((size_var_n,), name="A", dtype=dtype) placeholder_b = te.placeholder((size_var_n,), name="B", dtype=dtype) result_c = te.compute( placeholder_a.shape, lambda *i: te.fmod(placeholder_a(*i), placeholder_b(*i)), name="C" ) schedule = te.create_schedule(result_c.op) num_thread = 8 axis0, axis1