This is an automated email from the ASF dual-hosted git repository.
tlopex pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new fb6453a817 [Relax][TFLite] Fix `MIRROR_PAD`/`ONE_HOT` converters and
add tests for `PAD`, `PADV2`, `MIRROR_PAD`, `TOPK_V2`, `ONE_HOT` (#19373)
fb6453a817 is described below
commit fb6453a817264efd5b2e19c8b3a118e6b383725b
Author: Soowon Jeong <[email protected]>
AuthorDate: Fri Apr 10 03:38:57 2026 +0900
[Relax][TFLite] Fix `MIRROR_PAD`/`ONE_HOT` converters and add tests for
`PAD`, `PADV2`, `MIRROR_PAD`, `TOPK_V2`, `ONE_HOT` (#19373)
Part of #18971
Two bugs in the TFLite Relax frontend converters are fixed, and unit
tests
are added for the **Padding / Sparse / Other** category operators
claimed in
that tracking issue.
## Bug fixes
### `convert_mirror_pad`
Called `relax.op.nn.mirror_pad` which does not exist in the Relax op
namespace. Replaced with `relax.op.nn.pad` using `pad_mode="reflect"`
for
REFLECT mode (the modes are semantically equivalent). SYMMETRIC mode
raises
`OpAttributeUnImplemented` as there is no direct Relax equivalent.
### `convert_one_hot`
- `on_value` and `off_value` were passed as `Expr` (constant tensor
nodes),
but `relax.op.one_hot` requires `PrimValue` arguments.
- An extra `dtype` positional argument was passed, which the function
signature does not accept.
Fixed by extracting the scalar from each tensor buffer and wrapping it
in
`relax.PrimValue` with the correct dtype via `tvm.tirx.FloatImm` /
`tvm.tirx.IntImm`.
## Tests added
Each test uses the `verify()` + `tf.Module` pattern and includes an
explicit
expected IRModule verified with `tvm.ir.assert_structural_equal`.
| Operator | TFLite op | Notes |
|----------|-----------|-------|
| `test_pad` | `PAD` | constant zero padding |
| `test_pad_v2` | `PADV2` | explicit `constant_values=5.0` |
| `test_mirror_pad` | `MIRROR_PAD` | REFLECT mode |
| `test_topk_v2` | `TOPK_V2` | returns top-k values |
| `test_one_hot` | `ONE_HOT` | float32 on/off values, depth=4 |
---
.../tvm/relax/frontend/tflite/tflite_frontend.py | 24 +++--
tests/python/relax/test_frontend_tflite.py | 111 +++++++++++++++++++++
2 files changed, 129 insertions(+), 6 deletions(-)
diff --git a/python/tvm/relax/frontend/tflite/tflite_frontend.py
b/python/tvm/relax/frontend/tflite/tflite_frontend.py
index 1a437093a7..f71e5c564c 100644
--- a/python/tvm/relax/frontend/tflite/tflite_frontend.py
+++ b/python/tvm/relax/frontend/tflite/tflite_frontend.py
@@ -2550,7 +2550,13 @@ class OperatorConverter:
mode_byte = mirror_pad_options.Mode()
mode = "REFLECT" if mode_byte == 0 else "SYMMETRIC"
- out = relax.op.nn.mirror_pad(in_expr, paddings, mode)
+ if mode == "SYMMETRIC":
+ raise tvm.error.OpAttributeUnImplemented(
+ "MIRROR_PAD with SYMMETRIC mode is not yet supported."
+ )
+ # Flatten tuple-of-tuples to a list for relax.op.nn.pad
+ flat_pads = [int(v) for pair in paddings for v in pair]
+ out = relax.op.nn.pad(in_expr, flat_pads, pad_mode="reflect")
return out
@@ -3457,10 +3463,8 @@ class OperatorConverter:
"on_value and off_value should be the same type"
)
- # Getting relax expr
+ # Getting relax expr for indices
indices_expr = self.get_expr(indices.tensor_idx)
- on_value_expr = self.get_expr(on_value.tensor_idx)
- off_value_expr = self.get_expr(off_value.tensor_idx)
# Getting depth value
depth = self.get_tensor_value(depth)
@@ -3474,10 +3478,18 @@ class OperatorConverter:
one_hot_options.Init(op_options.Bytes, op_options.Pos)
axis = one_hot_options.Axis()
- # Setting dtype
+ # Extract scalar values for on_value and off_value and wrap as
PrimValue
dtype = self.get_tensor_type_str(on_value.tensor.Type())
+ on_val = self.get_tensor_value(on_value).item()
+ off_val = self.get_tensor_value(off_value).item()
+ if "float" in dtype:
+ on_prim = relax.PrimValue(tvm.tirx.FloatImm(dtype, float(on_val)))
+ off_prim = relax.PrimValue(tvm.tirx.FloatImm(dtype,
float(off_val)))
+ else:
+ on_prim = relax.PrimValue(tvm.tirx.IntImm(dtype, int(on_val)))
+ off_prim = relax.PrimValue(tvm.tirx.IntImm(dtype, int(off_val)))
- out = relax.op.one_hot(indices_expr, on_value_expr, off_value_expr,
depth, axis, dtype)
+ out = relax.op.one_hot(indices_expr, on_prim, off_prim, depth, axis)
return out
diff --git a/tests/python/relax/test_frontend_tflite.py
b/tests/python/relax/test_frontend_tflite.py
index 1f314e7d57..c0de33748e 100644
--- a/tests/python/relax/test_frontend_tflite.py
+++ b/tests/python/relax/test_frontend_tflite.py
@@ -31,6 +31,7 @@ from tvm import relax
from tvm.relax.frontend.tflite import from_tflite
from tvm.script.parser import ir as I
from tvm.script.parser import relax as R
+from tvm.script.parser import tirx as T
def _get_mod_from_cfunc(cfunc):
@@ -1302,5 +1303,115 @@ def test_reduction_ops(tf_op, relax_op, input_shape,
axes, keepdims, dtype):
verify(ReduceModule, expected)
+def test_pad():
+ class Pad(tf.Module):
+ @tf.function(input_signature=[tf.TensorSpec(shape=(2, 3),
dtype=tf.float32)])
+ def func(self, x):
+ return tf.pad(x, [[1, 1], [2, 2]])
+
+ @I.ir_module
+ class Expected:
+ @R.function
+ def main(x: R.Tensor((2, 3), dtype="float32")) -> R.Tensor((4, 7),
dtype="float32"):
+ R.func_attr({"num_input": 1})
+ with R.dataflow():
+ gv: R.Tensor((4, 7), dtype="float32") = R.nn.pad(
+ x, pad_width=[1, 1, 2, 2], pad_value=0.0,
pad_mode="constant"
+ )
+ R.output(gv)
+ return gv
+
+ verify(Pad, Expected)
+
+
+def test_pad_v2():
+ class PadV2(tf.Module):
+ @tf.function(input_signature=[tf.TensorSpec(shape=(2, 3),
dtype=tf.float32)])
+ def func(self, x):
+ return tf.pad(x, [[1, 1], [2, 2]], constant_values=5.0)
+
+ @I.ir_module
+ class Expected:
+ @R.function
+ def main(x: R.Tensor((2, 3), dtype="float32")) -> R.Tensor((4, 7),
dtype="float32"):
+ R.func_attr({"num_input": 1})
+ with R.dataflow():
+ gv: R.Tensor((4, 7), dtype="float32") = R.nn.pad(
+ x, pad_width=[1, 1, 2, 2], pad_value=5.0,
pad_mode="constant"
+ )
+ R.output(gv)
+ return gv
+
+ verify(PadV2, Expected)
+
+
+def test_mirror_pad():
+ class MirrorPad(tf.Module):
+ @tf.function(input_signature=[tf.TensorSpec(shape=(3, 4),
dtype=tf.float32)])
+ def func(self, x):
+ return tf.pad(x, [[1, 1], [2, 2]], mode="REFLECT")
+
+ @I.ir_module
+ class Expected:
+ @R.function
+ def main(x: R.Tensor((3, 4), dtype="float32")) -> R.Tensor((5, 8),
dtype="float32"):
+ R.func_attr({"num_input": 1})
+ with R.dataflow():
+ gv: R.Tensor((5, 8), dtype="float32") = R.nn.pad(
+ x, pad_width=[1, 1, 2, 2], pad_value=0.0,
pad_mode="reflect"
+ )
+ R.output(gv)
+ return gv
+
+ verify(MirrorPad, Expected)
+
+
+def test_topk_v2():
+ class TopKV2(tf.Module):
+ @tf.function(input_signature=[tf.TensorSpec(shape=(5,),
dtype=tf.float32)])
+ def func(self, x):
+ return tf.math.top_k(x, k=3).values
+
+ @I.ir_module
+ class Expected:
+ @R.function
+ def main(x: R.Tensor((5,), dtype="float32")) -> R.Tensor((3,),
dtype="float32"):
+ R.func_attr({"num_input": 1})
+ with R.dataflow():
+ lv: R.Tuple(
+ R.Tensor((3,), dtype="float32"), R.Tensor((3,),
dtype="int32")
+ ) = R.topk(x, k=3, axis=-1, ret_type="both", largest=True,
dtype="int32")
+ gv: R.Tensor((3,), dtype="float32") = lv[0]
+ R.output(gv)
+ return gv
+
+ verify(TopKV2, Expected)
+
+
+def test_one_hot():
+ class OneHot(tf.Module):
+ @tf.function(input_signature=[tf.TensorSpec(shape=(3,),
dtype=tf.int32)])
+ def func(self, x):
+ return tf.one_hot(x, depth=4)
+
+ @I.ir_module
+ class Expected:
+ @R.function
+ def main(x: R.Tensor((3,), dtype="int32")) -> R.Tensor((3, 4),
dtype="float32"):
+ R.func_attr({"num_input": 1})
+ with R.dataflow():
+ gv: R.Tensor((3, 4), dtype="float32") = R.one_hot(
+ x,
+ R.prim_value(T.float32(1.0)),
+ R.prim_value(T.float32(0.0)),
+ depth=4,
+ axis=-1,
+ )
+ R.output(gv)
+ return gv
+
+ verify(OneHot, Expected)
+
+
if __name__ == "__main__":
pytest.main(["-s", __file__])