mbrookhart commented on a change in pull request #9421:
URL: https://github.com/apache/tvm/pull/9421#discussion_r740667148
##########
File path: tests/python/relay/test_vm.py
##########
@@ -999,6 +999,71 @@ def test_shape_func_nested_function():
compiler.lower(mod, "llvm")
+def test_storage_size_and_offset_on_cpu():
+ """Tests allocations place sizes and offsets on the CPU host even if the
rest
+ of the computation is on a different device type."""
+
+ # CPU = device type 1
+ # GPU = device type 2
+ def input():
+ return tvm.parser.fromtext(
+ """
+ #[version = "0.0.5"]
+ def @main(%a: Tensor[(5, 7), float32],
+ param_device_types=[2], result_device_type=2) {
+ add(%a, %a)
+ }
+ """
+ )
+
+ exe = relay.vm.compile(
+ input(),
+ tvm.target.Target("cuda"),
+ )
+
+ print(exe.constants)
+ print(exe.bytecode)
+
+ # This program needs two constants:
+ # - The size of the tensor's storage (first arg) to alloc_storage
+ # - The offset of the tensor within the storage (second arg) to
alloc_tensor
+ # Both should be on the CPU
+ assert not "on device of type 2" in exe.constants
+ assert "on device of type 1" in exe.constants
+
+
+def test_reshape_shape_on_cpu():
+ """Tests the argument to a reshape places the shape on the CPU host even
if the rest
+ of the copmutation is on a different device type."""
+
+ # CPU = device type 1
+ # GPU = device type 2
+ def input():
+ newshape = [2, 4, 2]
+ metatable = {"relay.Constant": [relay.const(newshape, dtype="int64")]}
+ return tvm.parser.fromtext(
+ """
+ #[version = "0.0.5"]
+ def @main(%x: Tensor[(2, 8), float32],
+ param_device_types=[2], result_device_type=2) {
+ reshape(%x, newshape=[2, 4, 2])
+ }
+ """
+ )
+
+ exe = relay.vm.compile(
+ input(),
+ tvm.target.Target("cuda"),
+ )
+
+ print(exe.constants)
Review comment:
Nit, remove prints in tests
##########
File path: tests/python/relay/test_vm.py
##########
@@ -999,6 +999,71 @@ def test_shape_func_nested_function():
compiler.lower(mod, "llvm")
+def test_storage_size_and_offset_on_cpu():
+ """Tests allocations place sizes and offsets on the CPU host even if the
rest
+ of the computation is on a different device type."""
+
+ # CPU = device type 1
+ # GPU = device type 2
+ def input():
+ return tvm.parser.fromtext(
+ """
+ #[version = "0.0.5"]
+ def @main(%a: Tensor[(5, 7), float32],
+ param_device_types=[2], result_device_type=2) {
+ add(%a, %a)
+ }
+ """
+ )
+
+ exe = relay.vm.compile(
+ input(),
+ tvm.target.Target("cuda"),
+ )
+
+ print(exe.constants)
Review comment:
Nit, remove prints in tests
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]