gemini-code-assist[bot] commented on code in PR #18878:
URL: https://github.com/apache/tvm/pull/18878#discussion_r2893437810
##########
python/tvm/relax/frontend/onnx/onnx_frontend.py:
##########
@@ -1974,14 +1974,49 @@ def _impl_v11(cls, bb, inputs, attr, params):
class Tile(OnnxOpConverter):
"""Converts an onnx Tile node into an equivalent Relax expression."""
+ @staticmethod
+ def _tensor_length(expr):
+ shape = expr.struct_info.shape
+ if not isinstance(shape, relax.ShapeExpr):
+ return None
+
+ length = shape.values[0]
+ if not isinstance(length, tir.IntImm):
+ return None
+ return length.value
+
@classmethod
def _impl_v13(cls, bb, inputs, attr, params):
reps = get_constant(inputs[1], params)
if isinstance(reps, relax.Constant):
reps = reps.data.numpy().tolist()
- else:
- raise ValueError("Dynamic reps for Tile are supported yet.")
- return bb.emit_te(topi.tile, inputs[0], reps)
+ return bb.emit_te(topi.tile, inputs[0], reps)
+
+ data = inputs[0]
+ data_ndim = data.struct_info.ndim
+ reps_len = cls._tensor_length(reps)
+ if data_ndim == -1 or reps_len is None:
+ raise ValueError("Dynamic Tile requires known input rank and
repeats length.")
+
+ if reps.struct_info.dtype != "int64":
+ reps = bb.normalize(relax.op.astype(reps, "int64"))
+
+ data_shape = list(data.struct_info.shape.values)
+ data_shape_tensor =
bb.normalize(relax.op.shape_to_tensor(relax.ShapeExpr(data_shape)))
Review Comment:

The current implementation to get the shape of the input tensor `data`
relies on `data.struct_info.shape` being a `ShapeExpr`. However,
`data.struct_info.shape` can be `None` if the shape is not fully known (e.g.,
when only `ndim` is available), which would cause an `AttributeError`. A more
robust and idiomatic way to get the shape of a tensor as a new tensor is to use
`relax.op.shape_of(data)`. This simplifies the code and correctly handles cases
where shape information is not statically available as a `ShapeExpr`.
```suggestion
data_shape_tensor = bb.normalize(relax.op.shape_of(data))
```
##########
python/tvm/relax/frontend/onnx/onnx_frontend.py:
##########
@@ -1974,14 +1974,49 @@ def _impl_v11(cls, bb, inputs, attr, params):
class Tile(OnnxOpConverter):
"""Converts an onnx Tile node into an equivalent Relax expression."""
+ @staticmethod
+ def _tensor_length(expr):
+ shape = expr.struct_info.shape
+ if not isinstance(shape, relax.ShapeExpr):
+ return None
+
+ length = shape.values[0]
+ if not isinstance(length, tir.IntImm):
+ return None
+ return length.value
+
@classmethod
def _impl_v13(cls, bb, inputs, attr, params):
reps = get_constant(inputs[1], params)
if isinstance(reps, relax.Constant):
reps = reps.data.numpy().tolist()
- else:
- raise ValueError("Dynamic reps for Tile are supported yet.")
- return bb.emit_te(topi.tile, inputs[0], reps)
+ return bb.emit_te(topi.tile, inputs[0], reps)
+
+ data = inputs[0]
+ data_ndim = data.struct_info.ndim
+ reps_len = cls._tensor_length(reps)
+ if data_ndim == -1 or reps_len is None:
+ raise ValueError("Dynamic Tile requires known input rank and
repeats length.")
+
+ if reps.struct_info.dtype != "int64":
+ reps = bb.normalize(relax.op.astype(reps, "int64"))
+
+ data_shape = list(data.struct_info.shape.values)
+ data_shape_tensor =
bb.normalize(relax.op.shape_to_tensor(relax.ShapeExpr(data_shape)))
+ output_shape_tensor = reps
+
+ if data_ndim > reps_len:
+ reps_prefix = relax.const(_np.ones((data_ndim - reps_len,),
dtype="int64"), "int64")
+ output_shape_tensor = bb.normalize(relax.op.concat([reps_prefix,
output_shape_tensor], axis=0))
+ elif reps_len > data_ndim:
+ data_prefix = relax.const(_np.ones((reps_len - data_ndim,),
dtype="int64"), "int64")
+ data_shape_tensor = bb.normalize(relax.op.concat([data_prefix,
data_shape_tensor], axis=0))
+
+ output_shape_tensor =
bb.normalize(relax.op.multiply(output_shape_tensor, data_shape_tensor))
+ output_shape =
bb.normalize(relax.op.tensor_to_shape(output_shape_tensor))
+ output_shape_vars = [tir.Var(f"tile_dim_{i}", "int64") for i in
range(max(data_ndim, reps_len))]
Review Comment:


The `Tile._impl_v13` method is vulnerable to resource exhaustion. It uses
`data_ndim` and `reps_len` (derived from the ONNX model's metadata) to create
lists and constants without validating their maximum values. Specifically:
- Lines 2009 and 2012: `relax.const(_np.ones((...), dtype="int64"),
"int64")` creates a numpy array and a Relax constant based on the difference
between `data_ndim` and `reps_len`.
- Line 2017: `output_shape_vars = [tir.Var(f"tile_dim_{i}", "int64") for i
in range(max(data_ndim, reps_len))]` creates a list of `tir.Var` objects.
A malicious ONNX model with extremely large rank or dimension values could
cause the importer to consume excessive memory and CPU, leading to a Denial of
Service (DoS) during the model import process.
Recommendation: Add validation checks to ensure that the rank of the input
data and the length of the repeats tensor are within reasonable bounds (e.g.,
the maximum tensor rank supported by TVM) before using them to allocate
resources.
##########
tests/python/relax/test_frontend_onnx.py:
##########
@@ -2700,6 +2700,30 @@ def verify_tile(in_shape, repeats, out_shape):
verify_tile(x.shape, repeats, z_array.shape)
[email protected]("dynamic_input", [True, False])
+def test_tile_dynamic_repeats(dynamic_input):
+ x = np.random.rand(2, 3).astype(np.float32)
+ repeats = np.array([2, 2], dtype=np.int64)
+ out_shape = np.tile(x, repeats).shape
+
+ input_shape = ["?", "?"] if dynamic_input else list(x.shape)
+ output_shape = ["?" for _ in out_shape] if dynamic_input else
list(out_shape)
+
+ node = helper.make_node("Tile", inputs=["input", "repeats"],
outputs=["out"])
+ graph = helper.make_graph(
+ [node],
+ "tile_dynamic_repeats_test",
+ inputs=[
+ helper.make_tensor_value_info("input", TensorProto.FLOAT,
input_shape),
+ helper.make_tensor_value_info("repeats", TensorProto.INT64,
[len(repeats)]),
+ ],
+ outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT,
output_shape)],
+ )
+ model = helper.make_model(graph, producer_name="tile_dynamic_repeats_test")
+
+ check_correctness(model, inputs={"input": x, "repeats": repeats}, opset=13)
Review Comment:

The new test `test_tile_dynamic_repeats` is a good start for covering the
dynamic `repeats` case. However, it only covers the scenario where the rank of
the input tensor and the `repeats` tensor are the same. The ONNX `Tile`
operator has specific behavior when ranks are different, which involves padding
with 1s. The implementation in `onnx_frontend.py` includes logic for this, and
it's important to verify it.
I suggest parameterizing the test to include cases where:
- `rank(input) > rank(repeats)`
- `rank(input) < rank(repeats)`
This will make the test suite more comprehensive and ensure the
rank-mismatch logic is correct.
```python
@pytest.mark.parametrize("dynamic_input", [True, False])
@pytest.mark.parametrize(
"in_shape, repeats",
[
((2, 3), np.array([2, 2], dtype=np.int64)), # rank(input) ==
rank(repeats)
((2, 3, 4), np.array([2, 2], dtype=np.int64)), # rank(input) >
rank(repeats)
((2, 3), np.array([2, 2, 2], dtype=np.int64)), # rank(input) <
rank(repeats)
],
)
def test_tile_dynamic_repeats(dynamic_input, in_shape, repeats):
x = np.random.rand(*in_shape).astype(np.float32)
out_shape = np.tile(x, repeats).shape
input_shape = ["?" for _ in in_shape] if dynamic_input else
list(in_shape)
output_shape = ["?" for _ in out_shape] if dynamic_input else
list(out_shape)
node = helper.make_node("Tile", inputs=["input", "repeats"],
outputs=["out"])
graph = helper.make_graph(
[node],
"tile_dynamic_repeats_test",
inputs=[
helper.make_tensor_value_info("input", TensorProto.FLOAT,
input_shape),
helper.make_tensor_value_info("repeats", TensorProto.INT64,
[len(repeats)]),
],
outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT,
output_shape)],
)
model = helper.make_model(graph,
producer_name="tile_dynamic_repeats_test")
check_correctness(model, inputs={"input": x, "repeats": repeats},
opset=13)
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]