trevor-m commented on a change in pull request #6905:
URL: https://github.com/apache/incubator-tvm/pull/6905#discussion_r522510919
##########
File path: tests/python/contrib/test_tensorrt.py
##########
@@ -874,13 +960,82 @@ def test_densenet121():
run_and_verify_model("densenet121")
+def test_tensorrt_integration():
+ # Integration tests
+ test_alexnet()
+ test_resnet18_v1()
+ test_resnet18_v2()
+ test_squeezenet()
+ test_mobilenet()
+ test_mobilenet_v2()
+ test_vgg11()
+ test_densenet121()
+
+
+def test_dynamic_offload(data_shape=(1, 32, 8, 8), k_shape=(1, 32, 3, 3)):
+ """
+ This test checks for proper dynamic offloading of relay graphs. An
addition between
+ the outputs of two conv2d's is performed, one of them having all static
args whereas
+ the other has a arg with dynamic shape. It is expected for the TRT
partitioner to
+ offload the conv2d with dynamic arg to TVM while running the other in TRT.
+ """
+ x = relay.var("x", shape=(data_shape[0], data_shape[1], Any(), Any()),
dtype="float32")
+ y = relay.var("y", shape=(data_shape), dtype="float32")
+ kernel = relay.var("kernel", shape=(k_shape), dtype="float32")
+
+ def get_expected():
+ def set_func_attr(func, compile_name, symbol_name):
+ func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
+ func = func.with_attr("Inline", tvm.tir.IntImm("int32", 1))
+ func = func.with_attr("Compiler", compile_name)
+ func = func.with_attr("global_symbol", symbol_name)
+ return func
+
+ # Create a nested TRT function that matches the expected output
+ mod = tvm.IRModule()
+ var1 = relay.var("tensorrt_0_i0", shape=(data_shape), dtype="float32")
+ kernel_trt = relay.var("tensorrt_0_i1", shape=(k_shape),
dtype="float32")
+ out1 = relay.nn.conv2d(var1, kernel_trt, channels=k_shape[0],
kernel_size=k_shape[2:4])
+ f1 = GlobalVar("tensorrt_0")
+ func = relay.Function([var1, kernel_trt], out1)
+ func = set_func_attr(func, "tensorrt", "tensorrt_0")
+ mod[f1] = func
+ mod = relay.transform.InferType()(mod)
+
+ # Create the main function
+ out1 = relay.nn.conv2d(x, kernel, channels=k_shape[0],
kernel_size=k_shape[2:4])
+ out = relay.add(out1, f1(y, kernel))
+ f = relay.Function([x, y, kernel], out)
+ mod["main"] = f
+ mod = relay.transform.InferType()(mod)
+ return mod
+
+ # Create relay function that will be offloaded to TRT
+ out1 = relay.nn.conv2d(x, kernel, channels=k_shape[0],
kernel_size=k_shape[2:4])
+ out2 = relay.nn.conv2d(y, kernel, channels=k_shape[0],
kernel_size=k_shape[2:4])
+ out = relay.add(out1, out2)
+ f = relay.Function([x, y, kernel], out)
+
+ # Pass the function to TRT compilation
+ mod = tvm.IRModule()
+ mod["main"] = f
+ mod = relay.transform.InferType()(mod)
+ mod_trt, config = tensorrt.partition_for_tensorrt(mod, params={})
+
+ # Get the expected relay graph and compare
+ mod_exp = get_expected()
+ tvm.ir.assert_structural_equal(mod_trt, mod_exp, map_free_vars=True)
+ return
Review comment:
Don't need this return
##########
File path: tests/python/contrib/test_tensorrt.py
##########
@@ -874,13 +960,82 @@ def test_densenet121():
run_and_verify_model("densenet121")
+def test_tensorrt_integration():
+ # Integration tests
+ test_alexnet()
+ test_resnet18_v1()
+ test_resnet18_v2()
+ test_squeezenet()
+ test_mobilenet()
+ test_mobilenet_v2()
+ test_vgg11()
+ test_densenet121()
+
+
+def test_dynamic_offload(data_shape=(1, 32, 8, 8), k_shape=(1, 32, 3, 3)):
Review comment:
Lets move the args to variables inside the test. Same for the
serialization tests
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]