text
stringlengths 1
2.05k
|
---|
class MulOne2DConstant(tf.Module):
"""2D array as input with 2D constant as well; 2D constant stored in params after convert"""
def get_input(self):
return np.ones((2, 2), dtype="float32")
@tf.function(input_signature=[tf.TensorSpec(shape=(2, 2), dtype=tf.float32)])
def func(self, x):
return x * np.ones((2, 2), dtype="float32")
run_all(MulOne2DConstant)
def test_div_one_2d_constant(): |
class DivOne2DConstant(tf.Module):
"""2D array as input with 2D constant as well; 2D constant stored in params after convert"""
def get_input(self):
return np.ones((2, 2), dtype="float32")
@tf.function(input_signature=[tf.TensorSpec(shape=(2, 2), dtype=tf.float32)])
def func(self, x):
return x / np.ones((2, 2), dtype="float32")
run_all(DivOne2DConstant)
def test_strided_slice(): |
class StridedSlice(tf.Module):
def get_input(self):
return np.ones((3, 2, 3), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(3, 2, 3), dtype=tf.float32)])
def func(self, x):
return tf.strided_slice(x, [1, 0, 0], [2, 1, 3], [1, 1, 1])
run_all(StridedSlice)
def test_split(): |
class Split(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
a, b, c = tf.split(x, 3, axis=1)
return tf.raw_ops.Pack(values=[a, b, c], axis=1)
run_all(Split)
def test_shape(): |
class Shape(tf.Module):
def get_input(self):
return np.ones((3, 2, 3), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(3, 2, 3), dtype=tf.float32)])
def func(self, x):
a = tf.ones_like(tf.raw_ops.Shape(input=x), dtype=tf.float32)
return a + x
run_all(Shape)
def test_pack(): |
class Pack(tf.Module):
def get_input(self):
return np.ones((2, 3), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(2, 3), dtype=tf.float32)])
def func(self, x):
return tf.raw_ops.Pack(values=[x, x], axis=0)
run_all(Pack)
def test_max(): |
class Maximum(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
a, b = tf.split(x, 2, axis=1)
return tf.math.maximum(a, b, name=None)
run_all(Maximum)
def test_less(): |
class Less(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
a, b = tf.split(x, 2, axis=1)
return tf.math.less(a, b, name=None)
run_all(Less)
def test_equal(): |
class Equal(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
a, b = tf.split(x, 2, axis=1)
return tf.math.equal(a, b, name=None)
run_all(Equal)
def test_cast(): |
class Cast(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
return tf.cast(x, tf.int32)
run_all(Cast)
def test_expand_dims(): |
class ExpandDims(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
return tf.expand_dims(x, axis=2)
run_all(ExpandDims)
def test_transpose(): |
class Transpose(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
x = tf.expand_dims(x, axis=2)
return tf.transpose(x, perm=[0, 2, 1])
run_all(Transpose)
def test_reshape(): |
class Reshape(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
return tf.reshape(x, (1, 2, 15))
run_all(Reshape)
def test_tanh(): |
class Tanh(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
return tf.math.tanh(x)
run_all(Tanh)
def test_sigmoid(): |
class Sigmoid(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
return tf.math.sigmoid(x)
run_all(Sigmoid)
def test_relu(): |
class Relu(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
return tf.nn.relu(x)
run_all(Relu)
def test_floor(): |
class Floor(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
return tf.math.floor(x)
run_all(Floor)
def test_floor_mod(): |
class FloorMod(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
a, b = tf.split(x, 2, axis=1)
return tf.math.floormod(a, b)
run_all(FloorMod)
def test_concat_v2(): |
class ConcatV2(tf.Module):
def get_input(self):
return np.ones((1, 30), dtype=np.float32)
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), dtype=tf.float32)])
def func(self, x):
a, b, c = tf.split(x, 3, axis=1)
axis = tf.add(tf.constant(1, dtype="int32"), tf.constant(0, dtype="int32"))
return tf.raw_ops.ConcatV2(values=[a, b, c], axis=axis)
run_all(ConcatV2)
def test_multi_output(): |
class MultiOutput(tf.Module):
def get_input(self):
return np.ones((2, 2), dtype="float32")
@tf.function(input_signature=[tf.TensorSpec(shape=(2, 2), dtype=tf.float32)])
def func(self, x):
y = 2 * x
return x, y
run_func_graph(MultiOutput, runtime="vm", outputs=["Identity:output:0", "Identity_1:output:0"])
run_func_graph(
MultiOutput, runtime="graph", outputs=["Identity:output:0", "Identity_1:output:0"]
)
run_model_graph(MultiOutput, outputs=["Identity:output:0"])
def test_if():
def create_if_class(_condition=True): |
class If(tf.Module):
def get_input(self):
return np.ones((2, 2), dtype="float32")
@tf.function(input_signature=[tf.TensorSpec(shape=(2, 2), dtype=tf.float32)])
def func(self, x):
@tf.function(input_signature=[tf.TensorSpec(shape=(2, 2), dtype=tf.float32)])
def double(x):
return 2 * x
@tf.function(input_signature=[tf.TensorSpec(shape=(2, 2), dtype=tf.float32)])
def triple(x):
return 3 * x
output = tf.raw_ops.If(
cond=_condition,
input=[x],
Tout=[tf.float32],
output_shapes=[(2, 2)],
then_branch=double.get_concrete_function(),
else_branch=triple.get_concrete_function(),
)
return output[0]
return If
for cond in [True, False]:
if_class = create_if_class(_condition=cond)
run_func_graph(if_class, runtime="vm")
run_model_graph(if_class)
def test_stateless_while(): |
class StatelessWhile(tf.Module):
def get_input(self):
return np.array([6], dtype="float32")
@tf.function(input_signature=[tf.TensorSpec(shape=(1,), dtype=tf.float32)])
def func(self, x):
i = tf.constant(3.0)
cond = lambda i: tf.less(i, x)
body = lambda i: (tf.add(i, 2),)
r = tf.while_loop(cond, body, [i])
return r[0]
run_func_graph(StatelessWhile, runtime="vm")
run_model_graph(StatelessWhile)
def test_stateless_while_2var(): |
class StatelessWhile2Var(tf.Module):
def get_input(self):
return np.array([20], dtype="float32")
@tf.function(input_signature=[tf.TensorSpec(shape=(1,), dtype=tf.float32)])
def func(self, x):
i = tf.constant(3.0)
j = tf.constant(5.0)
cond = lambda i, j: tf.less(i + j, x)
body = lambda i, j: (tf.add(i, 2), tf.add(j, 3))
r = tf.while_loop(cond, body, [i, j])
return r
run_func_graph(
StatelessWhile2Var, runtime="vm", outputs=["Identity:output:0", "Identity_1:output:0"]
)
run_model_graph(StatelessWhile2Var, outputs=["Identity:output:0"])
def test_tensorlist():
def run_test(elem_shape): |
class TensorList(tf.Module):
def get_input(self):
in_tens = np.ones((2, 3), dtype="float32")
in_tens[1, :] = np.zeros((3,), dtype="float32")
return in_tens
@tf.function(input_signature=[tf.TensorSpec(shape=(2, 3), dtype=tf.float32)])
def func(self, x):
dtype = tf.float32
tl = tf.raw_ops.TensorListReserve(
element_shape=elem_shape, num_elements=2, element_dtype=dtype
)
tl = tf.raw_ops.TensorListSetItem(input_handle=tl, index=0, item=x[0, :])
tl = tf.raw_ops.TensorListSetItem(input_handle=tl, index=1, item=x[1, :])
output = tf.raw_ops.TensorListGetItem(
input_handle=tl, index=0, element_shape=elem_shape, element_dtype=dtype
)
return output
run_model_graph(TensorList)
run_func_graph(TensorList, runtime="vm")
run_test((3,))
run_test((-1,))
def test_tensorlist_stack():
def run_test(elem_shape): |
class TensorListStack(tf.Module):
def get_input(self):
in_tens = np.ones((2, 3), dtype="float32")
in_tens[1] = np.zeros((3,), dtype="float32")
return in_tens
@tf.function(input_signature=[tf.TensorSpec(shape=(2, 3), dtype=tf.float32)])
def func(self, x):
dtype = tf.float32
tl = tf.raw_ops.TensorListReserve(
element_shape=elem_shape, num_elements=2, element_dtype=dtype
)
tl = tf.raw_ops.TensorListFromTensor(tensor=x, element_shape=elem_shape)
output = tf.raw_ops.TensorListStack(
input_handle=tl, element_shape=elem_shape, element_dtype=dtype
)
return output
run_model_graph(TensorListStack)
run_func_graph(TensorListStack, runtime="vm")
run_test((3,))
run_test((-1,))
def test_tensorlist_2d():
def run_test(elem_shape): |
class TensorList2D(tf.Module):
def get_input(self):
in_tens = np.ones((2, 3, 4), dtype="float32")
in_tens[1, :, :] = np.zeros((3, 4), dtype="float32")
return in_tens
@tf.function(input_signature=[tf.TensorSpec(shape=(2, 3, 4), dtype=tf.float32)])
def func(self, x):
dtype = tf.float32
tl = tf.raw_ops.TensorListReserve(
element_shape=elem_shape, num_elements=2, element_dtype=dtype
)
tl = tf.raw_ops.TensorListSetItem(input_handle=tl, index=0, item=x[0, :, :])
tl = tf.raw_ops.TensorListSetItem(input_handle=tl, index=1, item=x[1, :, :])
output = tf.raw_ops.TensorListGetItem(
input_handle=tl, index=0, element_shape=elem_shape, element_dtype=dtype
)
return output
run_model_graph(TensorList2D)
run_func_graph(TensorList2D, runtime="vm")
run_test((3, 4))
run_test((-1, -1))
def test_tensorlist_stack_2d():
def run_test(elem_shape): |
class TensorListStack2D(tf.Module):
def get_input(self):
in_tens = np.ones((2, 3, 4), dtype="float32")
in_tens[1, :, :] = np.zeros((3, 4), dtype="float32")
return in_tens
@tf.function(input_signature=[tf.TensorSpec(shape=(2, 3, 4), dtype=tf.float32)])
def func(self, x):
dtype = tf.float32
tl = tf.raw_ops.TensorListReserve(
element_shape=elem_shape, num_elements=2, element_dtype=dtype
)
tl = tf.raw_ops.TensorListFromTensor(tensor=x, element_shape=elem_shape)
output = tf.raw_ops.TensorListStack(
input_handle=tl, element_shape=elem_shape, element_dtype=dtype
)
return output
run_model_graph(TensorListStack2D)
run_func_graph(TensorListStack2D, runtime="vm")
run_test((3, 4))
run_test((-1, -1))
def test_tensorlist_stack_unpack():
def run_test(elem_shape): |
class TensorListStack2D(tf.Module):
def get_input(self):
in_tens = np.ones((1, 3, 4), dtype="float32")
return in_tens
@tf.function(input_signature=[tf.TensorSpec(shape=(1, 3, 4), dtype=tf.float32)])
def func(self, x):
dtype = tf.float32
tl = tf.raw_ops.TensorListReserve(
element_shape=elem_shape, num_elements=1, element_dtype=dtype
)
tl = tf.raw_ops.TensorListSetItem(input_handle=tl, index=0, item=x[0, :, :])
output = tf.raw_ops.TensorListStack(
input_handle=tl, element_shape=elem_shape, element_dtype=dtype, num_elements=1
)
output = tf.raw_ops.Unpack(value=output, num=1, axis=0)
return output
run_model_graph(TensorListStack2D)
run_func_graph(TensorListStack2D, runtime="vm")
run_test((3, 4))
run_test((-1, -1))
def test_bincount_1d():
def run_test(weights, minlength, maxlength, axis, binary_output): |
class Bincount1D(tf.Module):
def get_input(self):
return np.random.uniform(low=0, high=maxlength, size=(100,)).astype("int32")
@tf.function(input_signature=[tf.TensorSpec(shape=[None], dtype=tf.int32)])
def func(self, x):
return tf.math.bincount(
x,
weights=weights,
minlength=minlength,
maxlength=maxlength,
axis=axis,
binary_output=binary_output,
)
run_model_graph(Bincount1D)
run_func_graph(Bincount1D, runtime="vm")
for axis in [None, 0, -1]:
run_test(weights=None, minlength=20, maxlength=20, axis=axis, binary_output=False)
run_test(weights=None, minlength=20, maxlength=20, axis=axis, binary_output=True)
weights = np.random.uniform(low=0.2, high=5, size=(100,)).astype("float32")
for axis in [0, -1]:
run_test(weights=weights, minlength=20, maxlength=20, axis=axis, binary_output=False)
def test_bincount_2d():
def run_test(weights, minlength, maxlength, axis, binary_output): |
class Bincount2D(tf.Module):
def get_input(self):
return np.random.uniform(low=0, high=maxlength, size=(3, 100)).astype("int32")
@tf.function(input_signature=[tf.TensorSpec([None, None], tf.int32)])
def func(self, x):
return tf.math.bincount(
x,
weights=weights,
minlength=minlength,
maxlength=maxlength,
axis=axis,
binary_output=binary_output,
)
run_model_graph(Bincount2D)
run_func_graph(Bincount2D, runtime="vm")
for axis in [None, 0, -1]:
run_test(weights=None, minlength=20, maxlength=20, axis=axis, binary_output=False)
run_test(weights=None, minlength=20, maxlength=20, axis=axis, binary_output=True)
weights = np.random.uniform(low=0.2, high=5, size=(3, 100)).astype("float32")
for axis in [0, -1]:
run_test(weights=weights, minlength=20, maxlength=20, axis=axis, binary_output=False)
if __name__ == "__main__":
pytest.main([__file__]) |
"""TF2 to relay converter test: testing models built with tf.keras.Sequential()""" |
import tempfile |
import numpy as np |
import pytest |
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants |
import convert_variables_to_constants_v2
from common |
import compare_tf_tvm
from common |
import run_tf_code
def run_sequential_model(model_fn, input_shape):
def get_input(shape):
_input = np.random.uniform(0, 1, shape).astype(dtype="float32")
return _input
def save_and_reload(_model):
with tempfile.TemporaryDirectory() as model_path:
tf.saved_model.save(_model, model_path)
loaded = tf.saved_model.load(model_path)
func = loaded.signatures["serving_default"]
frozen_func = convert_variables_to_constants_v2(func)
return frozen_func
def model_graph(model, input_shape):
_input = get_input(input_shape)
f = save_and_reload(model(input_shape))
_output = run_tf_code(f, _input)
gdef = f.graph.as_graph_def(add_shapes=True)
return gdef, _input, _output
compare_tf_tvm(*model_graph(model_fn, input_shape), runtime="vm")
def test_dense_model():
def dense_model(input_shape, num_units=128):
return tf.keras.Sequential(
[tf.keras.layers.Flatten(input_shape=input_shape[1:]), tf.keras.layers.Dense(num_units)]
)
run_sequential_model(dense_model, input_shape=(1, 28, 28))
def test_mnist_model():
def mnist_model(input_shape):
return tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=input_shape[1:]),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(10),
]
)
run_sequential_model(mnist_model, input_shape=(1, 28, 28))
def test_conv2d_model():
def conv2d_model(input_shape, kernel=(3, 3), filters=16):
model = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=input_shape[1:], batch_size=1),
tf.keras.layers.Conv2D(filters, kernel),
]
)
return model
run_sequential_model(conv2d_model, input_shape=(1, 32, 32, 3))
def test_maxpool_model():
def maxpool_model(input_shape, pool_size=(2, 2)):
model = tf.keras.Sequential(
[tf |
.keras.layers.MaxPool2D(pool_size=pool_size, input_shape=input_shape[1:])]
)
return model
run_sequential_model(maxpool_model, input_shape=(1, 32, 32, 3))
def test_maxpool_batchnorm_model():
def maxpool_batchnorm_model(input_shape, pool_size=(2, 2)):
model = tf.keras.Sequential(
[
tf.keras.layers.MaxPool2D(pool_size=pool_size, input_shape=input_shape[1:]),
tf.keras.layers.BatchNormalization(),
]
)
return model
run_sequential_model(maxpool_batchnorm_model, input_shape=(1, 32, 32, 3))
def test_tensorlist_stack_model():
def tensorlist_stack_model(input_shape): |
class TensorArrayStackLayer(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, inputs):
inputs = tf.squeeze(inputs)
outputs = tf.TensorArray(
tf.float32,
size=inputs.shape[0],
infer_shape=False,
element_shape=inputs.shape[1:],
)
outputs = outputs.unstack(inputs)
return outputs.stack()
input_shape = (3, 32)
model = tf.keras.Sequential(
[tf.keras.layers.Input(shape=input_shape, batch_size=1), TensorArrayStackLayer()]
)
return model
run_sequential_model(tensorlist_stack_model, input_shape=(3, 32))
def test_tensorlist_read_model():
def tensorlist_read_model(input_shape): |
class TensorArrayReadLayer(tf.keras.layers.Layer):
def __init__(self):
super().__init__()
def call(self, inputs):
inputs = tf.squeeze(inputs)
outputs = tf.TensorArray(
tf.float32,
size=inputs.shape[0],
infer_shape=False,
element_shape=inputs.shape[1:],
)
for i in range(inputs.shape[0]):
outputs = outputs.write(i, inputs[i, :])
return outputs.read(0)
input_shape = (3, 32)
model = tf.keras.Sequential(
[tf.keras.layers.Input(shape=input_shape, batch_size=1), TensorArrayReadLayer()]
)
return model
run_sequential_model(tensorlist_read_model, input_shape=(3, 32))
if __name__ == "__main__":
pytest.main([__file__]) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tvm.relay.frontend.common import StrAttrsDict
def test_key_is_present():
attrs = StrAttrsDict({"a": 1})
assert attrs.has_attr("a")
def test_key_is_not_present():
attrs = StrAttrsDict({"a": 1})
assert not attrs.has_attr("b")
if __name__ == "__main__":
test_key_is_present()
test_key_is_present()
|
"""
TFLite testcases
================
This article is a test script to test TFLite operator with Relay.
"""
from __future__ |
import print_function
from functools |
import partial
from distutils.version |
import LooseVersion |
import os |
import tempfile
from packaging |
import version as package_version |
import pytest |
import numpy as np
from PIL |
import Image |
import tvm |
import tvm.relay.testing.tf as tf_testing
from tvm.contrib.download |
import download_testdata
from tvm |
import relay
from tvm.contrib |
import graph_executor
from tflite.BuiltinOperator |
import BuiltinOperator
try: |
import tensorflow.compat.v1 as tf |
import tensorflow.compat.v1 as ops
except ImportError: |
import tensorflow as tf |
import tensorflow as ops
from tensorflow.python.framework |
import constant_op
from tensorflow.python.ops |
import math_ops
from tensorflow.python.ops |
import nn_ops
from tensorflow.python.ops |
import array_ops
from tensorflow.python.ops |
import image_ops
from tensorflow.python.ops |
import gen_array_ops
from tensorflow.python.ops |
import nn_impl
from tensorflow.python.ops |
import variables
try:
from tensorflow |
import lite as interpreter_wrapper
except ImportError:
from tensorflow.contrib |
import lite as interpreter_wrapper
def convert_to_list(x):
if not isinstance(x, list):
x = [x]
return x
def get_real_image(im_height, im_width, quantized=True):
repo_base = "https:
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8") if quantized else np.array(image).astype("float32")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
def pre_processed_image(height, width):
"""Image preprocessed"""
repo_base = "https:
img_name = "elephant-299.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = tf.io.read_file(img_path)
image = tf.image.decode_jpeg(image, channels=3)
with tf.name_scope("eval_image"):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.image.resize(image, [height, width], align_corners=False)
image = tf.expand_dims(image, axis=0)
return image
def get_real_image_object_detection(im_height, im_width):
repo_base = "https:
img_name = "street_small.jpg"
image_url = os.path.join(repo_base, img_name)
img_path = download_testdata(image_url, img_name, module="data")
image = Image.open(img_path).resize((im_height, im_width))
x = np.array(image).astype("uint8")
data = np.reshape(x, (1, im_height, im_width, 3))
return data
def vmobj_to_list(obj):
"""Converts TVM objects returned by VM execution to Python List."""
if isinstance(obj, tvm.nd.NDArray):
return [obj.numpy().tolist()]
elif isinstance(obj, tvm.runtime.container.ADT):
result = []
for f in obj:
result.extend(vmobj_to_list(f))
return result
elif isinstanc |
e(obj, tvm.relay.backend.interpreter.ConstructorValue):
if obj.constructor.name_hint == "Cons":
t_l = vmobj_to_list(obj.fields[1])
h_d = vmobj_to_list(obj.fields[0])
h_d.extend(t_l)
return h_d
elif obj.constructor.name_hint == "Nil":
return []
elif "tensor_nil" in obj.constructor.name_hint:
return [0]
elif "tensor" in obj.constructor.name_hint:
return [obj.fields[0].numpy()]
else:
raise RuntimeError(f"Unknown object type: {obj.constructor.name_hint}")
else:
raise RuntimeError(f"Unknown object type: {type(obj)}")
def _quantize_keras_model(
keras_model,
representative_data_gen,
is_float_input=False,
is_float_output=False,
int_quant_dtype=tf.int8,
):
"""Utility function to quantize a Keras model using TFLite converter."""
converter = interpreter_wrapper.TFLiteConverter.from_keras_model(keras_model)
if int_quant_dtype == tf.int8:
converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
inference_dtype = tf.uint8
elif int_quant_dtype == tf.int16:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.representative_dataset = representative_data_gen
converter.target_spec.supported_ops = [
tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]
inference_dtype = tf.uint16
else:
raise RuntimeError(
f"Invalid quantized dtype {int_quant_dtype}. Supported types: int8, int16."
)
if not is_float_input:
converter.inference_input_type = inference_dtype
if not is_float_output:
converter.inference_output_type = inference_dtype
return converter.convert()
def run_tvm_graph(
tflite_model_buf,
input_data,
input_node, |
num_output=1,
target="llvm",
out_names=None,
mode="graph_executor",
op_converter=relay.frontend.tflite.OperatorConverter,
):
"""Generic function to compile on relay and execute on tvm"""
try: |
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
except AttributeError: |
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
except ImportError as exc:
raise ImportError("The tflite package must be installed") from exc
input_data = convert_to_list(input_data)
input_node = convert_to_list(input_node)
shape_dict = {}
dtype_dict = {}
for i, node in enumerate(input_node):
shape_dict[node] = input_data[i].shape
dtype_dict[node] = input_data[i].dtype.name
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict, op_converter=op_converter
)
if mode in ["debug", "vm"]:
inputs = []
for param in mod["main"].params:
found = False
for i, n in enumerate(input_node):
if n == param.name_hint:
found = True
inputs.append(tvm.nd.array(input_data[i]))
break
if not found:
inputs.append(tvm.nd.array(params[param.name_hint]))
result = relay.create_executor(mode, mod=mod, device=tvm.cpu(), target="llvm").evaluate()(
*inputs
)
return vmobj_to_list(result)
else:
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target, params=params)
dev = tvm.device(target, 0)
m = graph_executor.GraphModule(lib["default"](dev))
for i, node in enumerate(input_node):
m.set_input(node, tvm.nd.array(input_data[i].astype(input_data[i].dtype)))
m.run()
assert out_names is None or num_output == len(
out_names
), f"out_names: {out_names} num_output: {num_output}"
tvm_output_list = []
for i in range(0, num_output):
tvm_output = m.get_output(i)
tvm_output_list.append(tvm_output.numpy())
return tvm_output_list
def run_tflite_graph(tflite_model_buf, input_data):
"""Generic function to execute TFLite""" |
input_data = convert_to_list(input_data)
interpreter = interpreter_wrapper.Interpreter(model_content=tflite_model_buf)
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
for i, input_detail in enumerate(input_details):
interpreter.resize_tensor_input(input_detail["index"], input_data[i].shape)
interpreter.allocate_tensors()
assert len(input_data) == len(input_details)
for i, input_detail in enumerate(input_details):
interpreter.set_tensor(input_detail["index"], input_data[i])
interpreter.invoke()
tflite_output = []
for _, output_detail in enumerate(output_details):
tflite_output.append(interpreter.get_tensor(output_detail["index"]))
return tflite_output
def compare_tflite_with_tvm(
in_data,
in_name,
input_tensors,
output_tensors,
init_global_variables=False,
out_names=None,
quantized=False,
input_range=None,
mode="graph_executor",
experimental_new_converter=False,
fp16_quantized=False,
int_quant_dtype=tf.int8,
):
"""Generic function to generate and compare TFLite and TVM output"""
in_data = convert_to_list(in_data)
in_name = convert_to_list(in_name)
out_names = convert_to_list(out_names)
in_node = [0] * len(in_name)
for i, _ in enumerate(in_name):
in_node[i] = in_name[i].split(":")[0] if ":" in in_name[i] else in_name[i]
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
converter = tf.lite.TFLiteConverter.from_session(sess, input_tensors, output_tensors)
converter.experimental_new_converter = experimental_new_converter
if quantized:
if int_quant_dtype == tf.int16:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_ops = [
tf.lite.OpsSet.EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_I |
NT8
]
else:
converter.inference_type = tf.lite.constants.QUANTIZED_UINT8
input_arrays = converter.get_input_arrays()
input_stats = {}
for i in input_arrays:
try:
quant_scale = 255 / (input_range[i][1] - input_range[i][0])
except ZeroDivisionError:
print("Min and max of the input range for tensor " + i + " can't be equal")
mean = -input_range[i][0] * quant_scale
input_stats[i] = (mean, quant_scale)
converter.quantized_input_stats = input_stats
elif fp16_quantized:
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_model_buffer = converter.convert()
tflite_output = run_tflite_graph(tflite_model_buffer, in_data)
for device in ["llvm"]:
_ = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print(f"Skip because {device} is not enabled")
continue
tvm_output = run_tvm_graph(
tflite_model_buffer,
in_data,
in_node,
target=device,
num_output=len(out_names),
out_names=out_names,
mode=mode,
)
if quantized and not fp16_quantized:
for i, _ in enumerate(tflite_output):
tvm.testing.assert_allclose(
tflite_output[i],
tvm_output[i],
atol=1,
rtol=1e-5,
)
else:
for i, _ in enumerate(tflite_output):
tvm.testing.assert_allclose(
tflite_output[i], |
tvm_output[i],
atol=1e-5,
rtol=1e-5,
)
def with_fused_activation_function(input_tensor, fn_name):
"""Fused activation function"""
if fn_name is None or fn_name == "NONE":
return input_tensor
if fn_name == "RELU":
return nn_ops.relu(input_tensor)
if fn_name == "RELU6":
return nn_ops.relu6(input_tensor)
if fn_name == "RELU_N1_TO_1":
return math_ops.maximum(-1, math_ops.minimum(input_tensor, 1))
if fn_name == "TANH":
return math_ops.tanh(input_tensor)
raise AssertionError(f"Unknown fused_activation_function {fn_name}")
def _test_split(in_shape, axis, num_splits, dtype):
"""internal split tester taking as parameters in_shape, number of tensors to split into
and dtype (data type)"""
np_data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=in_shape, dtype=dtype, name="in_data")
out = array_ops.split(in_data, num_splits, axis=axis)
num_splits = len(num_splits) if isinstance(num_splits, list) else num_splits
out_names = ["out_" + str(n) + ":0" for n in range(num_splits)]
compare_tflite_with_tvm([np_data], ["in_data"], [in_data], out, out_names=out_names)
def test_forward_split():
"""test split layer"""
_test_split((3,), 0, 1, "float32")
_test_split((3,), 0, 3, "float32")
_test_split((6,), 0, 3, "float32")
_test_split((6, 2), 0, 3, "float32")
_test_split((2, 6), 1, 6, "float32")
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_split((6, 2, 4), 0, 2, "int32")
_test_split((2, 6, 4), 1, 3, "float32")
_test_split((2, 4, 6), 2, 1, "float32")
_test_split((6, 1, 3, 5), 0, 3, "float32")
_test_split((1, 6, 3, 5), 1, 3, "float32")
_test_split((1, 3, 6, 5), 2, 3, "float32")
_test_split((1, 3, 5, 6), 3, 3, "float32")
_test_split((6, 1 |
, 3, 5), -4, 3, "float32")
_test_split((1, 6, 3, 5), -3, 3, "float32")
_test_split((1, 3, 6, 5), -2, 3, "float32")
_test_split((1, 3, 5, 6), -1, 3, "float32")
_test_split((6,), 0, [1, 2, 3], "float32")
_test_split((3, 6, 4), -2, [1, 4, 1], "float32")
def _test_slice(data, begin, size):
"""One iteration of SLICE"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.slice(in_data, begin, size)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_slice():
"""SLICE"""
_test_slice(np.arange(4, dtype=np.float32).reshape((4,)), begin=[0], size=[2])
_test_slice(np.arange(18, dtype=np.int32).reshape((3, 2, 3)), begin=[1, 0, 0], size=[1, 1, 3])
if package_version.parse(tf.VERSION) >= package_version.parse("1.14.0"):
_test_slice(np.arange(8, dtype=np.int32).reshape((2, 4)), begin=[0, 1], size=[-1, -1])
_test_slice(np.arange(5, dtype=np.int32).reshape((5,)), begin=[4], size=[-1])
def _test_topk(in_shape, k=1):
"""One iteration of TOPK"""
data = np.random.uniform(size=in_shape).astype("float32")
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = nn_ops.top_k(in_data, k, name="TopK")
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out[0]])
def test_forward_topk():
"""TOPK"""
_test_topk((3,), 1)
_test_topk((3,), 3)
_test_topk((3, 5, 7), 3)
_test_topk((3, 5, 7), 3)
def _test_gather(dshape, indices, axis, dtype, quantized=False, oob=False, wrap_idx=False):
"""One iteration of Gather"""
indices = np.asarray(indices).astype("int32")
data = np.random.uniform(1, 10, size=dshape)
data = data.astype(np.uint8) if quantized else data.astype(dtype)
with tf.Graph().as_default():
if wrap_idx:
in_name = "in_indices"
indices_expr = array_ops.placeholder( |
shape=indices.shape, dtype=indices.dtype, name=in_name
)
in_tensor_name = [in_name + ":0"]
in_indices = [indices_expr]
else:
indices_expr = indices
indices = []
in_tensor_name = []
in_indices = []
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype, name="in_data")
if axis:
out = array_ops.gather(in_data, indices_expr, axis=axis)
else:
out = array_ops.gather(in_data, indices_expr)
input_range = {"in_data": (-100, 100)} if quantized else None
try:
compare_tflite_with_tvm(
[data] + indices,
["in_data:0"] + in_tensor_name,
[in_data] + in_indices,
[out],
quantized=quantized,
input_range=input_range,
)
except ValueError as exc:
if not oob:
raise exc
except Exception as exc:
raise exc
def test_forward_gather():
"""GATHER"""
for quantized in [False, True]:
for wrap_idx in [False, True]:
_test_gather((4,), [1], 0, "float32", quantized, wrap_idx)
_test_gather((4,), [1], None, "int32", quantized, wrap_idx)
_test_gather((1, 4), [0], 0, "int32", quantized, wrap_idx)
_test_gather((4,), [[[1, 0], [0, 1]]], 0, "float32", quantized, wrap_idx)
_test_gather((2, 2), [[[1, 0], [0, 1]]], 1, "int32", quantized, wrap_idx)
_test_gather((2, 2), [[[1, 0], [0, 1]]], None, "float32", quantized, wrap_idx)
_test_gather((3, 3, 3), [[[1, 0]]], 0, "int32", quantized, wrap_idx)
_test_gather((3, 3, 3), [[[1, 0]]], 2, "int32", quantized, wrap_idx)
_test_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, "float32", quantized, wrap_idx)
_test_gather((3, 3, 3), [[[2, 1]]], -1, "int32", quantized, wrap_idx)
_test_gather((4,), [16], 0, "float32", quantized, oob=True) |
_test_gather((1, 3, 3), [12], 0, "int32", quantized, oob=True)
_test_gather((1, 3, 3), [20], 1, "float32", quantized, oob=True)
_test_gather((1, 3, 3), [20, 20], 2, "float32", quantized, oob=True)
def _test_gather_nd(data, indices):
"""One iteration of GATHER_ND"""
with tf.Graph().as_default():
in_data = tf.placeholder(shape=data.shape, dtype=data.dtype, name="data")
indices_data = tf.placeholder(shape=indices.shape, dtype=indices.dtype, name="indices")
out = tf.gather_nd(in_data, indices_data)
compare_tflite_with_tvm(
[data, indices], ["data:0", "indices:0"], [in_data, indices_data], [out]
)
def test_forward_gather_nd():
"""GATHER_ND"""
_test_gather_nd(
np.array([[[1.2, 2.0], [3.1, 4.1]], [[5.1, 6.1], [7.1, 8.1]]]).astype("float32"),
np.asarray([[0, 1], [1, 0]]).astype("int32"),
)
_test_gather_nd(
np.reshape(np.arange(30), [5, 6]).astype("int32"), np.asarray([[1, 2]]).astype("int32")
)
_test_gather_nd(
np.reshape(np.arange(12), [2, 3, 2]).astype("int32"),
np.asarray([[[0, 0], [0, 1]], [[1, 0], [1, 1]]]).astype("int32"),
)
_test_gather_nd(
np.reshape(np.arange(4), [4]).astype("float32"), np.asarray([1]).astype("int32")
)
_test_gather_nd(
np.reshape(np.arange(4), [1, 4]).astype("float32"), np.asarray([0]).astype("int32")
)
_test_gather_nd(
np.reshape(np.arange(4), [1, 4]).astype("float32"), np.asarray([0, 3]).astype("int32")
)
def _test_stridedslice(
ip_shape,
begin,
end,
stride,
dtype,
begin_mask=0,
end_mask=0,
new_axis_mask=0,
shrink_axis_mask=0,
ellipsis_mask=0,
quantized=False,
):
"""One iteration of a Stridedslice"""
data = np.random.uniform(size=ip_shape).astype(dtype)
data = data.astype(np.uint8) if quantized else data.astype(dtype)
with tf.Graph().as_default():
in_data = tf.placeholder(dtype, ip_shape, name="in_data") |
out = array_ops.strided_slice(
in_data,
begin,
end,
stride,
begin_mask=begin_mask,
end_mask=end_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask,
ellipsis_mask=ellipsis_mask,
)
input_range = {"in_data": (-100, 100)} if quantized else None
compare_tflite_with_tvm(
[data], ["in_data:0"], [in_data], [out], quantized=quantized, input_range=input_range
)
def test_forward_stridedslice():
"""test StridedSlice"""
for quantized in [False, True]:
_test_stridedslice(
(1, 3, 3),
[0, 0, 0],
[3, 3, 3],
[1, 1, 1],
"float32",
shrink_axis_mask=7,
quantized=quantized,
)
_test_stridedslice(
(1, 3, 3),
[0, 0, 0],
[3, 3, 3],
[1, 1, 1],
"float32",
shrink_axis_mask=5,
quantized=quantized,
)
_test_stridedslice((2), [1], [1], [1], "float32", shrink_axis_mask=1, quantized=quantized)
_test_stridedslice(
(3, 4, 3), [1, -1, 0], [4, -5, 3], [2, -1, 1], "float32", quantized=quantized
)
_test_stridedslice(
(3, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=0, quantized=quantized
)
_test_stridedslice(
(4, 4), [1, 0], [4, 4], [1, 1], "float32", shrink_axis_mask=2, quantized=quantized
)
_test_stridedslice(
(3, 4), [-1, 0], [0, 3], [1, 1], "float32", shrink_axis_mask=1, quantized=quantized
)
def _test_forward_transpose(ishape, axes=()):
data = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
if not axes:
out = array_ops.transpose(in_data)
else:
out = array_ops.transpose(in_data, axes) |
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_transpose():
_test_forward_transpose((2, 2))
_test_forward_transpose((2, 3, 4))
_test_forward_transpose((7, 8, 8, 10))
_test_forward_transpose((2, 3, 4), (1, 2, 0))
_test_forward_transpose((2, 3, 4), (0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), (3, 0, 1, 2))
_test_forward_transpose((2, 3, 4, 5), ())
def _test_cast(data, cast_dtype, use_mlir=False):
"""One iteration of CAST"""
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = math_ops.cast(in_data, cast_dtype)
compare_tflite_with_tvm(
data, "Placeholder:0", [in_data], [out], experimental_new_converter=use_mlir
)
def test_forward_cast():
"""CAST"""
for use_mlir in [False, True]:
_test_cast(
np.arange(6.0, dtype=np.float32).reshape((1, 6)), cast_dtype=tf.int32, use_mlir=use_mlir
)
_test_cast(
np.arange(6.0, dtype=np.float32).reshape((1, 6)), cast_dtype=tf.uint8, use_mlir=use_mlir
)
_test_cast(
np.arange(6.0, dtype=np.int32).reshape((1, 6)), cast_dtype=tf.int64, use_mlir=use_mlir
)
def _test_batch_matmul(a_shape, b_shape, dtype, adjoint_a=False, adjoint_b=False):
with tf.Graph().as_default():
a = array_ops.placeholder(shape=a_shape, dtype=dtype, name="A")
b = array_ops.placeholder(shape=b_shape, dtype=dtype, name="B")
result = math_ops.matmul(a, b, adjoint_a=adjoint_a, adjoint_b=adjoint_b, name="batchmatmul")
a_np = np.random.uniform(high=5.0, size=a_shape).astype(dtype)
b_np = np.random.uniform(high=5.0, size=b_shape).astype(dtype)
compare_tflite_with_tvm([a_np, b_np], [a.name, b.name], [a, b], [result])
def test_forward_batch_matmul():
"""BATCH_MAT_MUL"""
_test_batch_matmul((3, 5, 4), (3, 4, 5), "float32")
_test_batch_matmul((3, 5, 4), (3, 4, 5), "float32", True, True) |
_test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", True, False)
_test_batch_matmul((3, 5, 4), (3, 5, 4), "float32", False, True)
_test_batch_matmul((2, 3, 4, 5, 6), (2, 3, 4, 6, 5), "float32")
def _test_forward_tile(in_shape, reps, dtype):
data = np.random.uniform(-5, 5, size=in_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
out = array_ops.tile(in_data, reps)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_tile():
_test_forward_tile((2,), (3,), "int32")
_test_forward_tile((2, 2), (2, 3), "float32")
def _test_batch_to_space_nd(input_shape, block_shape, crops, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype=dtype)
out = array_ops.batch_to_space_nd(in_data, block_shape, crops)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_batch_to_space_nd():
_test_batch_to_space_nd(input_shape=[4, 1, 1, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 1, 1, 3], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 2, 2, 1], block_shape=[2, 2], crops=[[0, 0], [0, 0]])
_test_batch_to_space_nd(input_shape=[4, 3, 3, 1], block_shape=[2, 2], crops=[[0, 1], [0, 1]])
def _test_space_to_batch_nd(input_shape, block_shape, paddings, dtype="int32"):
data = np.random.uniform(0, 5, size=input_shape).astype(dtype)
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype=dtype)
out = array_ops.space_to_batch_nd(in_data, block_shape, paddings)
compare_tflite_with_tvm(data, "Placeholder:0", [in_data], [out])
def test_forward_space_to_batch_nd():
_test_space_to_batch_nd(input_shape=[1, 2, 2, 1], block_shape=[2, 2] |
, paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 2, 2, 3], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[1, 4, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [0, 0]])
_test_space_to_batch_nd(input_shape=[2, 2, 4, 1], block_shape=[2, 2], paddings=[[0, 0], [2, 0]])
def _test_pooling_iteration(input_shape, **kwargs):
"""One iteration of pool operation with given shapes and attributes"""
x = -np.arange(np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=input_shape, dtype="float32")
out = nn_ops.pool(in_data, **kwargs)
compare_tflite_with_tvm(x, "Placeholder:0", [in_data], [out])
def _test_pooling(input_shape, **kwargs):
_test_pooling_iteration(input_shape, **kwargs)
def test_forward_pooling():
"""Pooling"""
for pool_type in ["AVG", "MAX"]:
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[1, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 9, 10, 2],
window_shape=[2, 1],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[1, 1],
)
_test_pooling(
input_shape=[2, 10, 9, 2],
window_shape=[2, 3],
padding="SAME",
pooling_type=pool_type,
dilation_rate=[1, 1],
strides=[2, 1],
)
def _test_l2_pool2d(input_shape, ksize, strides, padding, data_format, fused_func_name=None):
x = np.arange(np.prod(input_shape), dtype=np.float32).reshape |
(input_shape) - 1
with tf.Graph().as_default():
in_data = tf.placeholder(dtype=tf.float32, name="input", shape=input_shape)
out = tf.sqrt(
tf.nn.avg_pool(
tf.square(in_data),
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
)
out = with_fused_activation_function(out, fused_func_name)
compare_tflite_with_tvm(x, "input", [in_data], [out])
def test_forward_l2_pool2d():
_test_l2_pool2d([1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], "SAME", "NHWC", "RELU6")
_test_l2_pool2d([2, 9, 10, 2], [1, 1, 1, 1], [1, 1, 1, 1], "SAME", "NHWC", "RELU6")
_test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 1, 1], "SAME", "NHWC")
_test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 2, 1], "SAME", "NHWC")
_test_l2_pool2d([1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], "VALID", "NHWC", "RELU")
_test_l2_pool2d([2, 9, 10, 2], [1, 1, 1, 1], [1, 1, 1, 1], "VALID", "NHWC")
_test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 1, 1], "VALID", "NHWC")
_test_l2_pool2d([2, 9, 10, 2], [1, 2, 1, 1], [1, 1, 2, 1], "VALID", "NHWC", "RELU6")
def _test_tflite2_quantized_convolution(
input_shape, kernel_shape, filters, padding="valid", data_format=None, int_quant_dtype=tf.int8
):
"""One iteration of TFLite2 quantized convolution with given shapes and attributes"""
data_format = "channels_last" if data_format == "NHWC" else "channels_first"
data = np.random.uniform(0, 1, input_shape).astype("float32")
_ = np.random.uniform(0, 1, kernel_shape).astype("float32")
data_in = tf.keras.layers.Input(shape=data.shape[1:])
conv = tf.keras.layers.Conv2D(
filters=filters,
kernel_size=(kernel_shape[0], kernel_shape[1]),
activation=tf.nn.relu,
padding=padding,
data_format=data_format,
)(data_in)
keras_model = tf.keras.models.Model(data_in, conv)
def representative_data_gen(): |
for _ in range(1):
yield [data]
tflite_model_quant = _quantize_keras_model(
keras_model,
representative_data_gen,
is_float_input=True,
is_float_output=True,
int_quant_dtype=int_quant_dtype,
)
try: |
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_quant, 0)
except AttributeError: |
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_quant, 0)
except ImportError as exc:
raise ImportError("The tflite package must be installed") from exc
subgraph = tflite_model.Subgraphs(0)
model_input = subgraph.InputsAsNumpy()
input_node = subgraph.Tensors(model_input).Name().decode("utf-8")
tflite_output = run_tflite_graph(tflite_model_quant, data)
if tf.__version__ < LooseVersion("2.9"):
input_node = data_in.name.replace(":0", "")
else:
input_node = "serving_default_" + data_in.name + ":0"
tvm_output = run_tvm_graph(tflite_model_quant, data, input_node)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-2, atol=1e-2
)
def test_forward_quantized_convolution():
"""Quantized convolution"""
for int_quant_dtype in [tf.int8, tf.int16]:
_test_tflite2_quantized_convolution(
(1, 28, 28, 1),
(1, 1),
12,
data_format="NHWC",
int_quant_dtype=int_quant_dtype,
)
_test_tflite2_quantized_convolution(
(1, 1, 28, 28),
(1, 1),
12,
data_format="NCWH",
int_quant_dtype=int_quant_dtype,
)
def test_forward_quantized_depthwise_convolution():
for int_quant_dtype in [tf.int8, tf.int16]:
_test_tflite2_quantized_depthwise_convolution(
[1, 8, 8, 128], [1, 1, 128, 1], [1, 1], [1, 1], "SAME", "NHWC", 1, int_quant_dtype
)
_test_tflite2_quantized_depthwise_convolution(
[1, 17, 17, 12], [3, 3, 12, 1], [1, 1], [2, 2], "VALID", "NHWC", 1, int_quant_dtype
)
_test_tflite2_quantized_depthwise_convolution(
[1, 24, 24, 3], [7, 7, 3, 8], [1, 1], [2, 2], "SAME", "NHWC", 8, int_quant_dtype
)
def _test_tflite2_quantized_depthwise_convolution(
input_shape,
kernel_shape,
dilations,
strides,
padding,
data_format,
depth_multiplier,
i |
nt_quant_dtype=tf.int8,
):
"""One iteration of TFLite2 quantized depthwise convolution with given shapes and attributes"""
data_format = "channels_last" if data_format == "NHWC" else "channels_first"
data = np.random.uniform(0, 1, input_shape).astype("float32")
kernel = np.random.uniform(0, 1, kernel_shape).astype("float32")
data_in = tf.keras.layers.Input(shape=data.shape[1:])
conv = tf.keras.layers.DepthwiseConv2D(
kernel_size=(kernel_shape[0], kernel_shape[1]),
strides=strides,
padding=padding,
data_format=data_format,
activation="relu",
use_bias=False,
depth_multiplier=depth_multiplier,
)(data_in)
keras_model = tf.keras.models.Model(data_in, conv)
keras_model.layers[1].set_weights([kernel])
def representative_data_gen():
for _ in range(1):
yield [data]
tflite_model_quant = _quantize_keras_model(
keras_model,
representative_data_gen,
is_float_input=True,
is_float_output=True,
int_quant_dtype=int_quant_dtype,
)
try: |
import tflite.Model
tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_quant, 0)
except AttributeError: |
import tflite
tflite_model = tflite.Model.GetRootAsModel(tflite_model_quant, 0)
except ImportError as exc:
raise ImportError("The tflite package must be installed") from exc
subgraph = tflite_model.Subgraphs(0)
model_input = subgraph.InputsAsNumpy()
input_node = subgraph.Tensors(model_input).Name().decode("utf-8")
tflite_output = run_tflite_graph(tflite_model_quant, data)
tvm_output = run_tvm_graph(tflite_model_quant, data, input_node)
tvm.testing.assert_allclose(
np.squeeze(tvm_output[0]), np.squeeze(tflite_output[0]), rtol=1e-2, atol=1e-2
)
def _test_convolution(
tensor_in_sizes,
filter_in_sizes,
dilations,
strides,
padding,
data_format,
is_depthwise=False,
quantized=False,
fp16_quantized=False,
):
"""One iteration of convolution with given shapes and attributes"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
if quantized:
data_array = np.random.uniform(0, 255, tensor_in_sizes).astype("uint8")
filter_array = np.random.uniform(0, 255, filter_in_sizes).astype("uint8")
else:
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)]
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
with tf.Graph().as_default():
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32", name="in_data")
in_filter = constant_op.constant(
filter_array, shape=filter_in_sizes, dtype="float32", name="in_filter"
)
strides = [1] + strides + [1]
dilations = [1] + dilations + [1]
if is_depthwise:
out = nn_ops.depthwise_conv2d_native(
in_data, in_filter, strides=strides, padding=padding, data_format=data_format
)
else:
out = nn_ops.conv2d(
in_data, in_filter, strides=strides, padding=padding, data_format=data_format |
)
if quantized and not fp16_quantized:
if is_depthwise:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="inq_data"
)
inq_filter = tf.quantization.fake_quant_with_min_max_args(
in_filter, min=-100, max=100, name="inq_filter"
)
out = nn_ops.depthwise_conv2d_native(
inq_data, inq_filter, strides=strides, padding=padding, data_format=data_format
)
out = tf.quantization.fake_quant_with_min_max_args(
out, min=-200, max=200, name="out"
)
input_range = {"in_data": (-100, 100)} if quantized else None
compare_tflite_with_tvm(
data_array,
"in_data",
[in_data],
[out],
quantized=quantized,
input_range=input_range,
experimental_new_converter=True,
)
else:
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="inq_data"
)
inq_filter = tf.quantization.fake_quant_with_min_max_args(
in_filter, min=-100, max=100, name="inq_filter"
)
out = nn_ops.conv2d(
inq_data, inq_filter, strides=strides, padding=padding, data_format=data_format
)
out = tf.quantization.fake_quant_with_min_max_args(
out, min=-200, max=200, name="out"
)
input_range = {"in_data": (-100, 100)} if quantized else None
compare_tflite_with_tvm(
data_array,
"in_data",
[i |
n_data],
[out],
quantized=quantized,
input_range=input_range,
experimental_new_converter=True,
)
else:
data_array = np.reshape(data_array, tensor_in_sizes).astype("float32")
compare_tflite_with_tvm(data_array, "in_data", [in_data], [out])
def test_forward_convolution():
"""Convolution"""
for quantized in [False, True]:
for fp16_quantized in [False, True]:
_test_convolution(
[4, 8, 8, 176],
[1, 1, 176, 32],
[1, 1],
[1, 1],
"SAME",
"NHWC",
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 19],
[3, 3, 19, 19],
[1, 1],
[2, 2],
"VALID",
"NHWC",
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 124],
[1, 1, 124, 19],
[1, 1],
[1, 1],
"SAME",
"NHWC",
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 12],
[3, 3, 12, 32],
[1, 1],
[2, 2],
"VALID",
"NHWC",
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 8, 8, 176],
[1, 1, 176, 1],
[1, 1],
[1, 1],
"SAME",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, |
17, 19],
[3, 3, 19, 1],
[1, 1],
[2, 2],
"VALID",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 124],
[1, 1, 124, 1],
[1, 1],
[1, 1],
"SAME",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 12],
[3, 3, 12, 1],
[1, 1],
[2, 2],
"VALID",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[4, 17, 17, 12],
[3, 3, 12, 2],
[1, 1],
[2, 2],
"VALID",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
_test_convolution(
[1, 76, 64, 1],
[9, 5, 1, 96],
[1, 1],
[1, 1],
"SAME",
"NHWC",
True,
quantized=quantized,
fp16_quantized=fp16_quantized,
)
if package_version.parse(tf.VERSION) >= package_version.parse("2.3.0"):
_test_convolution(
[1, 8, 8, 176], [1, 1, 176, 32], [1, 1], [1, 1], "SAME", "NHWC", quantized=True
)
_test_convolution(
[1, 17, 17, 12], [3, 3, 12, 32], [1, 1], [2, 2], "VALID", "NHWC", quantized=True
)
_test_convolution(
[1, 17, 17, 19], [3, 3, 19, 19], [1, 1], [2, 2], "VALID", "NHWC", quantized=True
)
_test_convolution(
[1, 17, 17, 124], [1, |
1, 124, 19], [1, 1], [1, 1], "SAME", "NHWC", quantized=True
)
def _test_transpose_conv(
tensor_in_sizes,
filter_in_sizes,
output_shape,
strides,
padding,
quantized=False,
fp16_quantized=False,
):
"""One iteration of transpose convolution with given shapes and attributes"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
with tf.Graph().as_default():
if quantized and not fp16_quantized:
data_array = [max(f, 255) for f in range(1, total_size_1 + 1)]
filter_array = [max(f, 255) for f in range(1, total_size_2 + 1)]
data_array = np.reshape(data_array, tensor_in_sizes).astype("uint8")
filter_array = np.reshape(filter_array, filter_in_sizes).astype("uint8")
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32", name="in_data")
inq_data = tf.quantization.fake_quant_with_min_max_args(
in_data, min=-100, max=100, name="q_data"
)
input_range = {"q_data": (-100, 100)}
in_filter = constant_op.constant(
filter_array, shape=filter_in_sizes, dtype="float32", name="in_filter"
)
inq_filter = tf.quantization.fake_quant_with_min_max_args(
in_filter, min=-100, max=100, name="q_filter"
)
strides = [1] + strides + [1]
out = nn_ops.conv2d_transpose(
inq_data, inq_filter, output_shape=output_shape, strides=strides, padding=padding
)
out = tf.quantization.fake_quant_with_min_max_args(out, min=-100, max=100, name="out")
compare_tflite_with_tvm(
[data_array], ["q_data"], [inq_data], [out], quantized=True, input_range=input_range
)
else:
data_array = [f * 1.0 for f in range(1, total_size_1 + 1)] |
filter_array = [f * 1.0 for f in range(1, total_size_2 + 1)]
in_data = array_ops.placeholder(shape=tensor_in_sizes, dtype="float32", name="in_data")
in_filter = constant_op.constant(
filter_array, shape=filter_in_sizes, dtype="float32", name="in_filter"
)
strides = [1] + strides + [1]
out = nn_ops.conv2d_transpose(
in_data, in_filter, output_shape=output_shape, strides=strides, padding=padding
)
data_array = np.reshape(data_array, tensor_in_sizes).astype("float32")
compare_tflite_with_tvm(
[data_array], ["in_data"], [in_data], [out], fp16_quantized=fp16_quantized
)
def test_forward_transpose_conv():
"""Transpose convolution"""
for quantized in [True, False]:
for fp16_quantized in [True, False]:
_test_transpose_conv(
[1, 5, 6, 16],
[2, 2, 16, 16],
[1, 10, 12, 16],
[2, 2],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 5, 6, 16],
[2, 2, 16, 16],
[1, 10, 12, 16],
[2, 2],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[4, 32, 32, 16],
[3, 3, 5, 16],
[4, 34, 34, 5],
[1, 1],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[3, 3, 5, 16],
[1, 65, 65, 5],
[2, 2],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[3, 3, 5, 16], |
[1, 65, 34, 5],
[2, 1],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[4, 32, 32, 16],
[3, 3, 5, 16],
[4, 32, 32, 5],
[1, 1],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[3, 3, 5, 16],
[1, 64, 64, 5],
[2, 2],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[3, 3, 5, 16],
[1, 64, 32, 5],
[2, 1],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[4, 32, 32, 16],
[2, 2, 5, 16],
[4, 33, 33, 5],
[1, 1],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[2, 2, 5, 16],
[1, 64, 64, 5],
[2, 2],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[2, 2, 5, 16],
[1, 64, 33, 5],
[2, 1],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[4, 32, 32, 16],
[2, 2, 5, 16],
[4, 32, 32, 5],
[1, 1],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[2, 2, 5 |
, 16],
[1, 64, 64, 5],
[2, 2],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[2, 2, 5, 16],
[1, 64, 32, 5],
[2, 1],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[4, 32, 32, 16],
[1, 1, 5, 16],
[4, 32, 32, 5],
[1, 1],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[1, 1, 5, 16],
[1, 63, 63, 5],
[2, 2],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[1, 1, 5, 16],
[1, 63, 32, 5],
[2, 1],
"VALID",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[4, 32, 32, 16],
[1, 1, 5, 16],
[4, 32, 32, 5],
[1, 1],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[1, 1, 5, 16],
[1, 63, 63, 5],
[2, 2],
"SAME",
quantized,
fp16_quantized,
)
_test_transpose_conv(
[1, 32, 32, 16],
[1, 1, 5, 16],
[1, 63, 32, 5],
[2, 1],
"SAME",
quantized,
fp16_quantized,
)
def _test_reshape(data, out_shape, wrap_shape, quantized=False):
"""One iteration of resh |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.