This is an automated email from the ASF dual-hosted git repository.

tlopex pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 7196ac3995 [Test][TFLite] Add unit tests for `LEAKY_RELU`, 
`HARD_SWISH` `ReLU_N1_to_1` and `LOG_SOFTMAX` (#19388)
7196ac3995 is described below

commit 7196ac399524aa10a9e6d39a208f92456c3cfb42
Author: Felix Hirwa Nshuti <[email protected]>
AuthorDate: Sat Apr 11 19:46:58 2026 +0200

    [Test][TFLite] Add unit tests for `LEAKY_RELU`, `HARD_SWISH` `ReLU_N1_to_1` 
and `LOG_SOFTMAX` (#19388)
    
    This PR adds unit test coverage for activation function operators
    in the Relax TFLite frontend, as part of
    https://github.com/apache/tvm/issues/18971
    - Added the `(tf.nn.log_softmax, R.nn.log_softmax)` entry to elementwise
    unit test
     - Added unit test for `LEAK_RELU` with default alpha
     - Added unit test for `HARD_SWISH`
     - Added unit test for `ReLU_N1_to_1`
    
    **Bugs fixed:**
    - `relax.op.tensor.clip` -> `relax.op.clip` in `_relu6` helper of
    `convert_hard_swish`
    - `relax.op.nn.leaky_relu` -> `relax.op.nn.leakyrelu` in
    `convert_leaky_relu`
---
 .../tvm/relax/frontend/tflite/tflite_frontend.py   |  4 +-
 tests/python/relax/test_frontend_tflite.py         | 67 ++++++++++++++++++++++
 2 files changed, 69 insertions(+), 2 deletions(-)

diff --git a/python/tvm/relax/frontend/tflite/tflite_frontend.py 
b/python/tvm/relax/frontend/tflite/tflite_frontend.py
index d7b56e597b..8b2f70a0f5 100644
--- a/python/tvm/relax/frontend/tflite/tflite_frontend.py
+++ b/python/tvm/relax/frontend/tflite/tflite_frontend.py
@@ -1010,7 +1010,7 @@ class OperatorConverter:
         output_tensor = output_tensors[0]
 
         def _relu6(data):
-            return relax.op.tensor.clip(data, 0.0, 6.0)
+            return relax.op.clip(data, min=0.0, max=6.0)
 
         def _hard_swish(data):
             return data * _relu6(data + relax.const(3.0)) / relax.const(6.0)
@@ -1094,7 +1094,7 @@ class OperatorConverter:
 
         if input_tensor.qnn_params:
             in_expr = self.dequantize(in_expr, input_tensor)
-        out = relax.op.nn.leaky_relu(in_expr, alpha_tensor)
+        out = relax.op.nn.leakyrelu(in_expr, alpha_tensor)
         if output_tensor.qnn_params:
             out = self.quantize(out, output_tensor)
 
diff --git a/tests/python/relax/test_frontend_tflite.py 
b/tests/python/relax/test_frontend_tflite.py
index c9a8470f42..195d2f5542 100644
--- a/tests/python/relax/test_frontend_tflite.py
+++ b/tests/python/relax/test_frontend_tflite.py
@@ -523,6 +523,7 @@ def test_square():
         (tf.math.rsqrt, R.rsqrt),
         (tf.nn.softmax, R.nn.softmax),
         (tf.math.sqrt, R.sqrt),
+        (tf.nn.log_softmax, R.nn.log_softmax),
     ],
 )
 def test_element_wise(tf_op, relax_op):
@@ -1918,5 +1919,71 @@ def test_space_to_depth():
     verify(SpaceToDepth, Expected)
 
 
+def test_leaky_relu():
+    class LeakyReLU(tf.Module):
+        @tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), 
dtype=tf.float32)])
+        def func(self, x):
+            return tf.nn.leaky_relu(x, alpha=0.2)
+
+    @I.ir_module
+    class Expected:
+        @R.function
+        def main(x: R.Tensor((1, 30), dtype="float32")) -> R.Tensor((1, 30), 
dtype="float32"):
+            R.func_attr({"num_input": 1})
+            with R.dataflow():
+                gv: R.Tensor((1, 30), dtype="float32") = R.nn.leakyrelu(
+                    x, alpha=0.20000000298023224
+                )
+                R.output(gv)
+            return gv
+
+    verify(LeakyReLU, Expected)
+
+
+def test_hard_swish():
+    class HardSwish(tf.Module):
+        @tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), 
dtype=tf.float32)])
+        def func(self, x):
+            return x * tf.nn.relu6(x + 3) / 6
+
+    @I.ir_module
+    class Expected:
+        @R.function
+        def main(x: R.Tensor((1, 30), dtype="float32")) -> R.Tensor((1, 30), 
dtype="float32"):
+            R.func_attr({"num_input": 1})
+            with R.dataflow():
+                lv: R.Tensor((1, 30), dtype="float32") = R.add(x, R.const(3.0, 
dtype="float32"))
+                lv1: R.Tensor((1, 30), dtype="float32") = R.clip(
+                    lv, R.prim_value(T.float64(0.0)), 
R.prim_value(T.float64(6.0))
+                )
+                lv2: R.Tensor((1, 30), dtype="float32") = R.multiply(x, lv1)
+                gv: R.Tensor((1, 30), dtype="float32") = R.divide(
+                    lv2, R.const(6.0, dtype="float32")
+                )
+                R.output(gv)
+            return gv
+
+    verify(HardSwish, Expected)
+
+
+def test_relu_n1_to_1():
+    class ReLU_N1_to_1(tf.Module):
+        @tf.function(input_signature=[tf.TensorSpec(shape=(1, 30), 
dtype=tf.float32)])
+        def func(self, x):
+            return tf.clip_by_value(x, -1.0, 1.0)
+
+    @I.ir_module
+    class Expected:
+        @R.function
+        def main(x: R.Tensor((1, 30), dtype="float32")) -> R.Tensor((1, 30), 
dtype="float32"):
+            R.func_attr({"num_input": 1})
+            with R.dataflow():
+                gv: R.Tensor((1, 30), dtype="float32") = R.clip(x, min=-1, 
max=1)
+                R.output(gv)
+            return gv
+
+    verify(ReLU_N1_to_1, Expected)
+
+
 if __name__ == "__main__":
     pytest.main(["-s", __file__])

Reply via email to