This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 010089eafe [Hotfix] Fix the conflicts about ffi-related updated names 
(#18287)
010089eafe is described below

commit 010089eafed221fd73e6f4fb9cd582ac3d63e2e7
Author: Shushi Hong <[email protected]>
AuthorDate: Mon Sep 8 20:33:53 2025 -0400

    [Hotfix] Fix the conflicts about ffi-related updated names (#18287)
    
    * Change registration of mock softmax function
    
    * Update check_asf_header.sh
    
    Remove unnecessary blank line in check_asf_header.sh
    
    * Update check_asf_header.sh
    
    * fix
---
 python/tvm/relax/relax_to_pyfunc_converter.py        | 14 ++++++++------
 tests/python/relax/test_relax_to_pyfunc_converter.py |  2 +-
 2 files changed, 9 insertions(+), 7 deletions(-)

diff --git a/python/tvm/relax/relax_to_pyfunc_converter.py 
b/python/tvm/relax/relax_to_pyfunc_converter.py
index 3de27d78c8..be985f847a 100644
--- a/python/tvm/relax/relax_to_pyfunc_converter.py
+++ b/python/tvm/relax/relax_to_pyfunc_converter.py
@@ -27,6 +27,7 @@ import torch.nn.functional as F
 
 import tvm
 from tvm import relax
+from tvm.runtime import empty, from_dlpack, Tensor
 from tvm.ir import IRModule, Op
 
 
@@ -608,7 +609,7 @@ class RelaxExpressionConverter:
             for arg in converted_args:
                 if isinstance(arg, torch.Tensor):
                     # Convert PyTorch tensor to TVM NDArray via DLPack
-                    tvm_arg = tvm.nd.from_dlpack(torch.to_dlpack(arg))
+                    tvm_arg = from_dlpack(torch.to_dlpack(arg))
                     tvm_args.append(tvm_arg)
                 else:
                     tvm_args.append(arg)
@@ -627,7 +628,7 @@ class RelaxExpressionConverter:
                 return f"<call_tir_error: {func_name} - Cannot determine 
output shape>"
 
             # Allocate output tensor
-            output_tensor = tvm.nd.array(tvm.nd.empty(output_shape, 
dtype="float32"))
+            output_tensor = empty(output_shape, dtype="float32")
             tvm_args.append(output_tensor)
 
             # Call the TIR function
@@ -635,7 +636,7 @@ class RelaxExpressionConverter:
 
             # The result is in the output_tensor we allocated
             # Convert result back to PyTorch tensor via DLPack
-            return torch.from_dlpack(output_tensor.to_dlpack())
+            return torch.from_dlpack(output_tensor)
 
         except (RuntimeError, ValueError, TypeError) as error:
             return f"<call_tir_error: {func_name} - {error}>"
@@ -669,7 +670,7 @@ class RelaxExpressionConverter:
             for arg in converted_args:
                 if isinstance(arg, torch.Tensor):
                     # Convert PyTorch tensor to TVM NDArray via DLPack
-                    tvm_arg = tvm.nd.from_dlpack(torch.to_dlpack(arg))
+                    tvm_arg = from_dlpack(torch.to_dlpack(arg))
                     tvm_args.append(tvm_arg)
                 else:
                     tvm_args.append(arg)
@@ -678,8 +679,9 @@ class RelaxExpressionConverter:
             result = packed_function(*tvm_args)
 
             # Convert result back to PyTorch tensor via DLPack
-            if isinstance(result, tvm.nd.NDArray):
-                return torch.from_dlpack(result.to_dlpack())
+            if isinstance(result, Tensor):
+                # Convert TVM Tensor to PyTorch tensor
+                return torch.from_dlpack(result)
             else:
                 return result
 
diff --git a/tests/python/relax/test_relax_to_pyfunc_converter.py 
b/tests/python/relax/test_relax_to_pyfunc_converter.py
index 6dce309315..ec37e6e77d 100644
--- a/tests/python/relax/test_relax_to_pyfunc_converter.py
+++ b/tests/python/relax/test_relax_to_pyfunc_converter.py
@@ -200,7 +200,7 @@ def create_mock_packed_function():
         return x
 
     # Register the function globally
-    tvm.register_func("my_softmax", mock_softmax)
+    tvm.register_global_func("my_softmax", mock_softmax)
 
 
 class TestRelaxToPyFuncConverter:

Reply via email to