This is an automated email from the ASF dual-hosted git repository.

ruihangl pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new cf6e61b285 [Relax] Allow ingesting Upsample module from torch.export 
either using Size or Scale Factor argument (#17721)
cf6e61b285 is described below

commit cf6e61b2859fc1115bf285c79c5726f4e1433458
Author: Hugo Latendresse <[email protected]>
AuthorDate: Mon Mar 10 14:49:48 2025 -0400

    [Relax] Allow ingesting Upsample module from torch.export either using Size 
or Scale Factor argument (#17721)
    
    Torch's Upsample module can only accomodate the Size or
    Scale Factor argument but not both. Regarding that limitation,
    there was previously a bug regarding the parsing of the arguments
    (the wrong order of arguments was assume).
    
    This PR fixes that and adds a unit test.
---
 .../frontend/torch/exported_program_translator.py  | 41 +++++++++++++++----
 tests/python/relax/test_from_exported_to_cuda.py   | 46 +++++++++++++++++++---
 2 files changed, 74 insertions(+), 13 deletions(-)

diff --git a/python/tvm/relax/frontend/torch/exported_program_translator.py 
b/python/tvm/relax/frontend/torch/exported_program_translator.py
index e8e8706714..f3c0a64676 100644
--- a/python/tvm/relax/frontend/torch/exported_program_translator.py
+++ b/python/tvm/relax/frontend/torch/exported_program_translator.py
@@ -92,7 +92,12 @@ class ExportedProgramImporter(BaseFXGraphImporter):
         )
 
     def _upsample_impl(
-        self, x: relax.Expr, size, align_corners: bool, scale_factor, method: 
str
+        self,
+        x: relax.Expr,
+        size,
+        scale_factor,
+        method: str,
+        align_corners: bool,
     ) -> relax.Var:
         coord_trans = "align_corners" if align_corners else "half_pixel"
 
@@ -119,17 +124,39 @@ class ExportedProgramImporter(BaseFXGraphImporter):
         align_corners = (
             node.args[2] if len(node.args) > 2 else 
node.kwargs.get("align_corners", True)
         )
-        scale_factor = node.args[3] if len(node.args) > 3 else 
node.kwargs.get("scale_factor", None)
-        return self._upsample_impl(x, size, align_corners, scale_factor, 
"linear")
+        scale_factor = node.args[3] if len(node.args) > 3 else 
node.kwargs.get("scale_factor", 1)
+        return self._upsample_impl(
+            x, size=size, scale_factor=scale_factor, method="linear", 
align_corners=align_corners
+        )
 
     def _upsample_nearest2d(self, node: fx.node) -> relax.Var:
         x = self.env[node.args[0]]
         size = node.args[1] if len(node.args) > 1 else node.kwargs.get("size", 
None)
-        align_corners = (
-            node.args[2] if len(node.args) > 2 else 
node.kwargs.get("align_corners", True)
+
+        if size:
+            scale_factor = None  # Can only define size or scale_factor, not 
both
+            align_corners = (
+                node.args[2] if len(node.args) > 2 else 
node.kwargs.get("align_corners", None)
+            )
+
+        else:
+            # TODO figure out why pytorch export passes a list such as
+            # [scale_factor,scale_factor] instead of just an int for
+            # scale_factor. Using first element for now
+            scale_factor = (
+                node.args[2][0] if len(node.args) > 2 else 
node.kwargs.get("scale_factor", 1)
+            )
+            align_corners = (
+                node.args[3] if len(node.args) > 3 else 
node.kwargs.get("align_corners", None)
+            )
+
+        return self._upsample_impl(
+            x,
+            size=size,
+            scale_factor=scale_factor,
+            method="nearest_neighbor",
+            align_corners=align_corners,
         )
-        scale_factor = node.args[3] if len(node.args) > 3 else 
node.kwargs.get("scale_factor", None)
-        return self._upsample_impl(x, size, align_corners, scale_factor, 
"nearest_neighbor")
 
     ########## Manipulation ##########
 
diff --git a/tests/python/relax/test_from_exported_to_cuda.py 
b/tests/python/relax/test_from_exported_to_cuda.py
index d39bb8e9fe..69daab36a5 100644
--- a/tests/python/relax/test_from_exported_to_cuda.py
+++ b/tests/python/relax/test_from_exported_to_cuda.py
@@ -15,14 +15,14 @@
 # specific language governing permissions and limitations
 # under the License.
 
+import tvm
+from tvm import relax
+import tvm.testing
 import numpy as np
 import torch
 from torch.export import export
-
-import tvm
-import tvm.testing
-from tvm import relax
 from tvm.relax.frontend.torch import from_exported_program
+from torch.nn import Softmax, Upsample
 
 
 def assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, 
target, dev):
@@ -42,8 +42,6 @@ def 
assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, tar
     tvm_mod, tvm_params = relax.frontend.detach_params(mod_from_torch)
 
     relax_pipeline = 
relax.get_default_pipeline(tvm.target.Target.from_device(tvm.cuda()))
-    # TODO try pipeline below?
-    # releax_pipeline = 
relax.backend.cuda.pipeline.get_default_pipeline(target)
     ex = relax.build(tvm_mod, target=target, relax_pipeline=relax_pipeline)
     vm = relax.VirtualMachine(ex, dev)
 
@@ -57,6 +55,42 @@ def 
assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, tar
     np.testing.assert_allclose(actual=actual, desired=desired, rtol=1e-5, 
atol=1e-5)
 
 
[email protected]_targets("cuda")
+def test_upsample_with_size(target, dev):
+    """
+    The Upsample module can be used with the size arugment or the scale
+    factor argument but not both. This tests the former.
+    """
+    batch_size = 1
+    channels = 3
+    height, width = 8, 8
+
+    torch_module = Upsample(size=(64, 64), mode="nearest", 
recompute_scale_factor=None)
+
+    raw_data = np.random.rand(batch_size, channels, height, 
width).astype("float32")
+
+    assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, 
target, dev)
+
+
[email protected]_targets("cuda")
+def test_upsample_with_scale_factor(target, dev):
+    """
+    The Upsample module can be used with the size arugment or the scale
+    factor argument but not both. This tests the latter.
+    """
+    batch_size = 2
+    channels = 3
+    height, width = 32, 32
+
+    torch_module = Upsample(
+        size=None, scale_factor=7, mode="nearest", align_corners=None, 
recompute_scale_factor=True
+    )
+
+    raw_data = np.random.rand(batch_size, channels, height, 
width).astype("float32")
+
+    assert_torch_output_vs_tvm_from_exported_to_cuda(raw_data, torch_module, 
target, dev)
+
+
 @tvm.testing.parametrize_targets("cuda")
 def test_linalg_vector_norm(target, dev):
     class VectorNorm0(torch.nn.Module):

Reply via email to