lhutton1 commented on a change in pull request #6724:
URL: https://github.com/apache/incubator-tvm/pull/6724#discussion_r510173933



##########
File path: tests/python/contrib/test_arm_compute_lib/test_dense.py
##########
@@ -215,18 +231,18 @@ def test_codegen_dense():
     np.random.seed(0)
 
     dtype = ["float32"]
-    shape = [((1, 128), (16, 128), 16), ((32, 32), (32, 32), 32), ((1, 64), 
(1, 64), 1)]
+    shape = [(1, (1, 128), (16, 128), 16), (1, (32, 32), (32, 32), 32), (0, 
(1, 64), (1, 64), 1)]
     composite = [False, True]
     trials = generate_trials([dtype, shape, composite], 3)
 
-    for dtype, (shape, weight_shape, units), composite in trials:
+    for dtype, (acl_partitions, shape, weight_shape, units), composite in 
trials:
         inputs = {"a"}
 
         args = (shape, weight_shape, units, dtype)
 
         func, params = _get_model(*args, var_names=iter(inputs), 
has_bias=composite)
         exp_codegen = _get_expected_codegen(*args, has_bias=composite)
-        verify_codegen(func, exp_codegen, 1)
+        verify_codegen(func, exp_codegen, acl_partitions, 1 - acl_partitions)

Review comment:
       Why do we have a different calculation for the number of tvm ops 
compared to the test above? Intuitively, I feel like these should be the same

##########
File path: tests/python/contrib/test_arm_compute_lib/test_dense.py
##########
@@ -20,8 +20,8 @@
 
 import tvm
 from tvm import relay
-
-from .infrastructure import (
+from tvm import testing
+from test_arm_compute_lib.infrastructure import (

Review comment:
       Is it worth moving to this format in all other tests for consistency?

##########
File path: tests/python/contrib/test_arm_compute_lib/test_dense.py
##########
@@ -323,7 +357,7 @@ def test_codegen_qnn_dense():
             has_bias=composite,
         )
         exp_codegen = _get_expected_codegen(*args, has_bias=composite)
-        verify_codegen(func, exp_codegen, 1)
+        verify_codegen(func, exp_codegen, acl_partitions, 2 - 2 * 
acl_partitions)

Review comment:
       Similar to above

##########
File path: tests/python/contrib/test_arm_compute_lib/test_pooling.py
##########
@@ -175,7 +181,8 @@ def test_pooling():
         ["nn.avg_pool2d", fp32_dtype, (2, 2), (2, 2), (1, 1), False, False, 
(16, 16, 16)],
         ["nn.avg_pool2d", fp32_dtype, (2, 2), (2, 2), (0, 0), False, True, 
(16, 16, 16)],
         ["nn.avg_pool2d", fp32_dtype, (3, 3), (2, 2), (0, 1), True, False, 
(15, 15, 16)],
-        ["nn.avg_pool2d", uint8_dtype, (2, 2), (2, 2), (1, 1), False, True, 
(16, 16, 16)],

Review comment:
       Did this test work previously to this PR?

##########
File path: tests/python/contrib/test_arm_compute_lib/test_network.py
##########
@@ -151,7 +152,34 @@ def get_model():
     )
 
 
+# Test disabled as it requires attontate target not promote unannotated tuples 
under previous operations annotation
[email protected](reason="no way of currently testing this")
+def test_squeezenet():
+    Device.load("test_config.json")
+
+    if skip_runtime_test():

Review comment:
       Just wondering why we explicitly skip this above using 
`pytest.mark.skip` as well as skip here? If it is purely because we don't have 
the runtime in CI then this check should be enough




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to