This is an automated email from the ASF dual-hosted git repository.
tlopex pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 15ac9dbc73 [Relax][Onnx] Support Local Response Normalization (LRN)
(#18668)
15ac9dbc73 is described below
commit 15ac9dbc73a4950f1e353691f2d8624f18ee8b90
Author: Nguyen Duy Loc <[email protected]>
AuthorDate: Tue Jan 20 23:23:19 2026 +0700
[Relax][Onnx] Support Local Response Normalization (LRN) (#18668)
This PR supported Local Response Normalization operator for ONNX.
### Description
Implement and Test Local Response Normalization operator for ONNX
frontend.
### Implement
- Using avg_pool operator to compute LRN
- Pseudocode:
```
def local_response_norm(input, size, alpha, beta, k):
dim = input.dim()
check_only_support_3D_4D()
div = input.mul(input)
div = expand_dim(div, 1)
pad_len = size // 2
if dim == 3:
div = avg_pool2d(div,
(size, 1),
stride=1,
padding=(pad_len, 0, pad_len, 0))
else:
div = avg_pool3d(div,
(size, 1, 1),
stride=1,
padding=(pad_len, 0, 0, pad_len, 0, 0))
div = squeeze_dim(div, 1)
div = div.mul(alpha).add(k).pow(beta)
return input / div
```
### Reference
Implement same as Pytorch:
https://discuss.pytorch.org/t/why-use-avgpool2d-and-avgpool3d-in-local-response-norm/97236
---
python/tvm/relax/frontend/onnx/onnx_frontend.py | 59 ++++++++++++++++++++++++-
tests/python/relax/test_frontend_onnx.py | 27 +++++++++++
2 files changed, 85 insertions(+), 1 deletion(-)
diff --git a/python/tvm/relax/frontend/onnx/onnx_frontend.py
b/python/tvm/relax/frontend/onnx/onnx_frontend.py
index 9968eb5ed8..befe131a69 100644
--- a/python/tvm/relax/frontend/onnx/onnx_frontend.py
+++ b/python/tvm/relax/frontend/onnx/onnx_frontend.py
@@ -2432,6 +2432,63 @@ class MeanVarianceNormalization(OnnxOpConverter):
return (data - data_mean) / relax.op.sqrt(data_squared_mean -
data_mean_squared)
+class LocalResponseNormalization(OnnxOpConverter):
+ """Converts an onnx LocalResponseNormalization node into an equivalent
Relax expression."""
+
+ @classmethod
+ def _impl_v13(cls, bb, inputs, attr, params):
+ data = inputs[0]
+ size = attr["size"]
+ alpha = attr.get("alpha", 0.0001)
+ beta = attr.get("beta", 0.75)
+ bias = attr.get("bias", 1.0)
+
+ if hasattr(data.struct_info, "ndim"):
+ ndim = data.struct_info.ndim
+ else:
+ ndim = len(data.struct_info.shape)
+
+ if ndim not in [3, 4]:
+ raise ValueError(f"LRN only supports 3D or 4D input, got {ndim}D.")
+
+ data_squared = relax.op.multiply(data, data)
+ data_expanded = relax.op.expand_dims(data_squared, axis=1)
+ pad_len = size // 2
+ if ndim == 3:
+ pool_padding = [pad_len, 0, pad_len, 0]
+ pool_op = relax.op.nn.avg_pool2d
+ pool_size = (size, 1)
+ layout = "NCHW"
+ strides = (1, 1)
+ else:
+ pool_padding = [pad_len, 0, 0, pad_len, 0, 0]
+ pool_op = relax.op.nn.avg_pool3d
+ pool_size = (size, 1, 1)
+ layout = "NCDHW"
+ strides = (1, 1, 1)
+
+ data_avgpool = pool_op(
+ data_expanded,
+ pool_size=pool_size,
+ strides=strides,
+ padding=pool_padding,
+ layout=layout,
+ ceil_mode=False,
+ count_include_pad=True,
+ )
+ data_squeezed = relax.op.squeeze(data_avgpool, axis=1)
+
+ const_alpha = relax.const(alpha, dtype="float32")
+ const_bias = relax.const(bias, dtype="float32")
+ const_beta = relax.const(beta, dtype="float32")
+
+ scale = relax.op.multiply(data_squeezed, const_alpha)
+ scale = relax.op.add(scale, const_bias)
+ denominator = relax.op.power(scale, const_beta)
+
+ return relax.op.divide(data, denominator)
+
+
class Pool(OnnxOpConverter):
"""A helper class for pool op converters."""
@@ -3863,6 +3920,7 @@ def _get_convert_map():
"EmbedLayerNormalization": EmbedLayerNormalization,
"InstanceNormalization": InstanceNormalization,
"MeanVarianceNormalization": MeanVarianceNormalization,
+ "LRN": LocalResponseNormalization,
# defs/reduction
"ReduceMax": ReduceMax,
"ReduceMin": ReduceMin,
@@ -3902,7 +3960,6 @@ def _get_convert_map():
"Unique": Unique,
"NonZero": NonZero,
# "If": If,
- # "LRN": LRN,
# "MaxRoiPool": MaxRoiPool,
# "RoiAlign": RoiAlign,
"NonMaxSuppression": NonMaxSuppression,
diff --git a/tests/python/relax/test_frontend_onnx.py
b/tests/python/relax/test_frontend_onnx.py
index df94c13478..344bc26065 100644
--- a/tests/python/relax/test_frontend_onnx.py
+++ b/tests/python/relax/test_frontend_onnx.py
@@ -1673,6 +1673,33 @@ def test_embedlayernormalization():
)
+def test_local_response_norm():
+ lrn_node = helper.make_node(
+ op_type="LRN",
+ inputs=["input"],
+ outputs=["output"],
+ name="LRN_Node",
+ alpha=0.0001,
+ beta=0.75,
+ bias=1.0,
+ size=3,
+ )
+
+ graph = helper.make_graph(
+ [lrn_node],
+ "local_response_norm_test",
+ inputs=[
+ helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 3,
32, 32]),
+ ],
+ outputs=[
+ helper.make_tensor_value_info("output", TensorProto.FLOAT, [1, 3,
32, 32]),
+ ],
+ )
+
+ model = helper.make_model(graph, producer_name="local_response_norm_test")
+ check_correctness(model)
+
+
def create_reduce_test_parameters_axes_attr():
output = []
for value in [True, False]: