This is an automated email from the ASF dual-hosted git repository.
lukhut pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 4cb75b97cd [microNPU] Add a legalization test for TFLite PAD (#13750)
4cb75b97cd is described below
commit 4cb75b97cda679321a2707d01f351de1cc1ff69a
Author: Alexey Yazev <[email protected]>
AuthorDate: Tue Jan 10 21:46:43 2023 +0400
[microNPU] Add a legalization test for TFLite PAD (#13750)
Added a legalization test for stand-alone pad operation which is legalized
to depthwise operation on the NPU.
---
tests/python/contrib/test_ethosu/test_legalize.py | 100 ++++++++++++++++++++++
1 file changed, 100 insertions(+)
diff --git a/tests/python/contrib/test_ethosu/test_legalize.py
b/tests/python/contrib/test_ethosu/test_legalize.py
index 9b4dd467ff..5ddc7565f2 100644
--- a/tests/python/contrib/test_ethosu/test_legalize.py
+++ b/tests/python/contrib/test_ethosu/test_legalize.py
@@ -674,6 +674,106 @@ def
test_tflite_depthwise_conv2d_with_separate_padding_legalize():
verify(mod["tvmgen_default_ethos_u_main_0"])
[email protected]("ifm_shape", [(1, 55, 55, 3), (1, 23, 32, 7)])
[email protected]("padding", [(0, 1, 0, 0), (1, 1, 1, 1), (1, 1, 5, 5)])
[email protected]("const_value", [0, 5, 125, -5])
+def test_tflite_separate_padding_legalize(ifm_shape, padding, const_value):
+ dtype = "int8"
+ kernel_shape = (1, 1)
+ strides = (1, 1)
+ dilation = (1, 1)
+
+ def create_tflite_graph():
+ class Model(tf.Module):
+ @tf.function
+ def tf_function(self, x):
+ return tf.pad(
+ x,
+ [[0, 0], [padding[0], padding[2]], [padding[1],
padding[3]], [0, 0]],
+ "CONSTANT",
+ const_value,
+ )
+
+ model = Model()
+ concrete_func = model.tf_function.get_concrete_function(
+ tf.TensorSpec(ifm_shape, dtype=tf.float32)
+ )
+ # Convert the model
+ def representative_dataset():
+ for _ in range(100):
+ data = np.random.rand(*tuple(ifm_shape))
+ yield [data.astype(np.float32)]
+
+ converter =
tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])
+ converter.optimizations = [tf.lite.Optimize.DEFAULT]
+ converter.representative_dataset = representative_dataset
+ converter.target_spec.supported_ops =
[tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
+ converter.inference_input_type = tf.int8
+ converter.inference_output_type = tf.int8
+ tflite_model = converter.convert()
+ return tflite_model
+
+ def verify(ext_func):
+ op = ext_func.body
+ ofm_channels = op.attrs.ofm_channels
+
+ # check IFM
+ ifm = op.args[0].checked_type
+ assert list(ifm.shape) == list(ifm_shape)
+ assert str(ifm.dtype) == dtype
+ assert ifm.shape[3] == ofm_channels
+
+ # check OFM
+ ofm = op.checked_type
+ expected_ofm_shape = infra.compute_ofm_shape(
+ ifm_shape, padding, kernel_shape, strides, dilation
+ )
+ assert list(ofm.shape) == list(expected_ofm_shape)
+ assert str(ofm.dtype) == dtype
+ assert ofm.shape[3] == ofm_channels
+
+ # check weights
+ weights_ohwi = op.args[1].data.asnumpy()
+ assert str(weights_ohwi.dtype) == dtype
+ assert weights_ohwi.shape[0] == ofm_channels
+ assert weights_ohwi.shape[1] == kernel_shape[0]
+ assert weights_ohwi.shape[2] == kernel_shape[1]
+ assert weights_ohwi.shape[3] == 1 # only depth multiplier 1 is
supported
+
+ # Check that scale_bias matches weight tensor
+ assert list(op.args[2].checked_type.shape)[0] == ofm_channels
+
+ assert list(op.attrs.padding) == list(padding)
+ assert op.attrs.ofm_channels == ofm_channels
+ assert list(op.attrs.strides) == list(strides)
+ assert list(op.attrs.dilation) == list(dilation)
+
+ pad_pattern_table = [
+ (
+ ethosu.PadParams.composite_name,
+ ethosu.pad_pattern(),
+ lambda pat: ethosu.PadParams(pat).is_valid(),
+ )
+ ]
+
+ tflite_graph = create_tflite_graph()
+ tflite_model = tflite.Model.Model.GetRootAsModel(tflite_graph, 0)
+
+ mod, params = relay.frontend.from_tflite(
+ tflite_model,
+ shape_dict={"input": ifm_shape},
+ dtype_dict={"input": dtype},
+ )
+
+ mod["main"] = bind_params_by_name(mod["main"], params)
+ mod = partition_ethosu_by_table(mod, pad_pattern_table)
+
+ mod["tvmgen_default_ethos_u_main_0"] = dataflow_pattern.rewrite(
+ legalize.PadRewriter(), mod["tvmgen_default_ethos_u_main_0"]
+ )
+ verify(mod["tvmgen_default_ethos_u_main_0"])
+
+
@pytest.mark.parametrize("pooling_type", ["MAX", "AVG"])
@pytest.mark.parametrize("ifm_shape", [[1, 3, 4, 3], [1, 4, 5, 2]])
@pytest.mark.parametrize(