This is an automated email from the ASF dual-hosted git repository.
syfeng pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 4e36b9b75b [Relax][PyTorch] Add mul_.Tensor, max.default, min.default
and pow.Scalar Op Support into Exported Program Frontend (#17843)
4e36b9b75b is described below
commit 4e36b9b75b2186e3a3f1da78c80fa056cb6a1592
Author: Deivanayaki S <[email protected]>
AuthorDate: Fri Apr 18 11:26:24 2025 +0530
[Relax][PyTorch] Add mul_.Tensor, max.default, min.default and pow.Scalar
Op Support into Exported Program Frontend (#17843)
* add mapping to the missing binary ops
* add test script for the supported ops
---------
Co-authored-by: deivanayakisankaralingam <deiva@Deivanayaki>
---
.../frontend/torch/exported_program_translator.py | 4 +++
.../relax/test_frontend_from_exported_program.py | 39 ++++++++++++++++++++++
2 files changed, 43 insertions(+)
diff --git a/python/tvm/relax/frontend/torch/exported_program_translator.py
b/python/tvm/relax/frontend/torch/exported_program_translator.py
index 9064de37f0..cecffa753f 100644
--- a/python/tvm/relax/frontend/torch/exported_program_translator.py
+++ b/python/tvm/relax/frontend/torch/exported_program_translator.py
@@ -345,11 +345,15 @@ class ExportedProgramImporter(BaseFXGraphImporter):
),
"max.other": self._binary_op(relax.op.maximum, max),
"min.other": self._binary_op(relax.op.minimum, min),
+ "max.default": self._unary_op(relax.op.max),
+ "min.default": self._unary_op(relax.op.min),
"remainder.Tensor": self._binary_op(relax.op.mod, operator.mod),
"remainder.Scalar": self._binary_op(relax.op.mod, operator.mod),
"mul.Tensor": self._binary_op(relax.op.multiply, operator.mul),
+ "mul_.Tensor": self._binary_op(relax.op.multiply, operator.mul),
"ne.Tensor": self._binary_op(relax.op.not_equal, operator.ne),
"ne.Scalar": self._binary_op(relax.op.not_equal, operator.ne),
+ "pow.Scalar": self._binary_op(relax.op.power, operator.pow),
"pow.Tensor_Scalar": self._binary_op(relax.op.power, operator.pow),
"pow.Tensor_Tensor": self._binary_op(relax.op.power, operator.pow),
"sub.Tensor": self._binary_op(relax.op.subtract, operator.sub),
diff --git a/tests/python/relax/test_frontend_from_exported_program.py
b/tests/python/relax/test_frontend_from_exported_program.py
index 4c60fcd651..6cdefbb12e 100644
--- a/tests/python/relax/test_frontend_from_exported_program.py
+++ b/tests/python/relax/test_frontend_from_exported_program.py
@@ -474,6 +474,44 @@ def test_extended_unary_ops():
verify_model(Reciprocal(), example_args, {}, expected_reciprocal)
+ # Returns the maximum value of all elements in the input tensor.
+ class MaxModel(Module):
+ def forward(self, input):
+ return torch.max(input)
+
+ @tvm.script.ir_module
+ class expected_max:
+ @R.function
+ def main(
+ input: R.Tensor((1, 3, 10, 10), dtype="float32")
+ ) -> R.Tuple(R.Tensor((), dtype="float32")):
+ with R.dataflow():
+ lv: R.Tensor((), dtype="float32") = R.max(input, axis=None,
keepdims=False)
+ gv: R.Tuple(R.Tensor((), dtype="float32")) = (lv,)
+ R.output(gv)
+ return gv
+
+ verify_model(MaxModel(), example_args, {}, expected_max)
+
+ # Returns the minimum value of all elements in the input tensor.
+ class MinModel(Module):
+ def forward(self, input):
+ return torch.min(input)
+
+ @tvm.script.ir_module
+ class expected_min:
+ @R.function
+ def main(
+ input: R.Tensor((1, 3, 10, 10), dtype="float32")
+ ) -> R.Tuple(R.Tensor((), dtype="float32")):
+ with R.dataflow():
+ lv: R.Tensor((), dtype="float32") = R.min(input, axis=None,
keepdims=False)
+ gv: R.Tuple(R.Tensor((), dtype="float32")) = (lv,)
+ R.output(gv)
+ return gv
+
+ verify_model(MinModel(), example_args, {}, expected_min)
+
def test_hardtanh():
class Hardtanh(torch.nn.Module):
@@ -804,6 +842,7 @@ operator_binary_1 = [
(torch.ops.aten.add_, R.add),
(operator.sub, R.subtract),
(operator.mul, R.multiply),
+ (torch.ops.aten.mul_, R.multiply),
(operator.truediv, R.divide),
(operator.floordiv, R.floor_divide),
(operator.pow, R.power),