This is an automated email from the ASF dual-hosted git repository.
yongwww pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 5ae3db2107 [Relax][PyTorch] Add stack.default and sum.default to
exported programs translator (#17814)
5ae3db2107 is described below
commit 5ae3db21077e804325835cc98a295e56147c72c4
Author: Hugo Latendresse <[email protected]>
AuthorDate: Thu Apr 10 23:45:03 2025 -0400
[Relax][PyTorch] Add stack.default and sum.default to exported programs
translator (#17814)
* stack correct
* sum correct in side script
* all pass
---
.../frontend/torch/exported_program_translator.py | 2 ++
tests/python/relax/test_from_exported_to_cuda.py | 28 ++++++++++++++++++++++
2 files changed, 30 insertions(+)
diff --git a/python/tvm/relax/frontend/torch/exported_program_translator.py
b/python/tvm/relax/frontend/torch/exported_program_translator.py
index 73742f952b..875ec3b83e 100644
--- a/python/tvm/relax/frontend/torch/exported_program_translator.py
+++ b/python/tvm/relax/frontend/torch/exported_program_translator.py
@@ -381,6 +381,7 @@ class ExportedProgramImporter(BaseFXGraphImporter):
"mean.dim": self._mean,
"prod.default": self._prod,
"std.correction": self._std,
+ "sum.default": self._sum,
"sum.dim_IntList": self._sum,
"var.correction": self._var,
# search
@@ -409,6 +410,7 @@ class ExportedProgramImporter(BaseFXGraphImporter):
"split_with_sizes.default": self._split,
"squeeze.default": self._squeeze,
"squeeze.dim": self._squeeze,
+ "stack.default": self._stack,
"take.default": self._take,
"tile.default": self._tile,
"topk.default": self._topk,
diff --git a/tests/python/relax/test_from_exported_to_cuda.py
b/tests/python/relax/test_from_exported_to_cuda.py
index 56ee527caf..8405f48576 100644
--- a/tests/python/relax/test_from_exported_to_cuda.py
+++ b/tests/python/relax/test_from_exported_to_cuda.py
@@ -512,5 +512,33 @@ def test_index_select(target, dev):
assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module,
target, dev)
[email protected]_targets("cuda")
+def test_stack(target, dev):
+ class StackModel(nn.Module):
+ def forward(self, x):
+ val1 = x[1, 4]
+ val2 = x[3, 2]
+ val3 = x[5, 6]
+ z = torch.stack([val1, val2, val3])
+ return z
+
+ torch_module = StackModel().eval()
+ raw_data = np.random.rand(10, 10, 10).astype("float32")
+ assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module,
target, dev)
+
+
[email protected]_targets("cuda")
+def test_sum(target, dev):
+ class SumModel(nn.Module):
+ def forward(self, x):
+ new_vec = x[1, 4]
+ return new_vec.sum()
+
+ torch_module = SumModel().eval()
+
+ raw_data = np.random.rand(10, 10, 10).astype("float32")
+ assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module,
target, dev)
+
+
if __name__ == "__main__":
tvm.testing.main()