comaniac commented on a change in pull request #6248:
URL: https://github.com/apache/incubator-tvm/pull/6248#discussion_r469518979
##########
File path: tests/python/contrib/test_arm_compute_lib/test_pooling.py
##########
@@ -74,53 +142,133 @@ def test_pooling():
device = Device()
np.random.seed(0)
- for dtype, low, high, atol, rtol in [("float32", -127, 128, 0.001, 0.001),
("uint8", 0, 255, 0, 0)]:
- for size in [(2, 2), (3, 3)]:
- for stride in [(2, 2)]:
- shape = (1, size[0] + stride[0] * 5,
- size[1] + stride[1] * 5, 16)
- pad = (0, 0)
-
- inputs = {
- "a": tvm.nd.array(np.random.uniform(low, high,
shape).astype(dtype)),
- }
-
- outputs = []
- func = _get_model(shape, dtype, relay.nn.max_pool2d, size,
- stride, pad, True, iter(inputs))
- for acl in [False, True]:
- outputs.append(build_and_run(func, inputs, 1, None, device,
- enable_acl=acl)[0])
-
- params = {
- "size": size,
- "stride": stride,
- "shape": shape,
- "pooling type": "max",
- "dtype": dtype,
- "padding": pad
- }
- verify(outputs, atol=atol, rtol=rtol, params=params)
+ typef = ["nn.max_pool2d", "nn.avg_pool2d", "nn.l2_pool2d"]
+ dtype = [("float32", -127, 128, 0.001, 0.001), ("uint8", 0, 255, 1, 0)]
+ size = [(2, 2), (3, 3)]
+ stride = [(2, 2)]
+ pad = [(0, 0), (1, 1), (0, 1)]
+ ceil_mode = [False, True]
+ count_include_pad = [False, True]
+ input_shapes = [(8, 8, 16), (9, 9, 16)]
+ trials = generate_trials([typef, dtype, size, stride, pad, ceil_mode,
count_include_pad, input_shapes], 3)
Review comment:
Why change back to random workloads? If you really think it's too
tedious to list all workloads with this number of parameters, `generate_trials`
at least should be deterministic. For example, it could enumerate all
combinations and prune invalid workloads.
##########
File path: python/tvm/relay/op/contrib/arm_compute_lib.py
##########
@@ -98,6 +99,45 @@ def qnn_conv_pattern():
pattern, wildcard(), wildcard(), is_constant(), is_constant())
return pattern
+ def avg_pool2d_pattern():
+ """Create a uint8 avg_pool2d pattern.
+
+ Returns
+ -------
+ pattern : dataflow_pattern.AltPattern
+ Denotes the convolution pattern.
+ """
+ pattern = is_op('cast')(wildcard())
+ pattern = is_op('nn.avg_pool2d')(pattern)
+ pattern = is_op('cast')(pattern)
+ return pattern
+
+ def global_avg_pool2d_pattern():
+ """Create a uint8 global_avg_pool2d pattern.
+
+ Returns
+ -------
+ pattern : dataflow_pattern.AltPattern
+ Denotes the convolution pattern.
+ """
+ pattern = is_op('cast')(wildcard())
+ pattern = is_op('nn.global_avg_pool2d')(pattern)
+ pattern = is_op('cast')(pattern)
+ return pattern
Review comment:
Seems like the underlying process for these two patterns are the same
(`CreateCompositeAvgPool2DJSONNode `), I feel you can merge them to one pattern
like
```python
pattern = is_op('cast')(wildcard())
pattern = is_op('nn.avg_pool2d')(pattern) |
is_op('nn.global_avg_pool2d')(pattern)
pattern = is_op('cast')(pattern)
```
##########
File path: tests/python/contrib/test_arm_compute_lib/infrastructure.py
##########
@@ -129,9 +129,20 @@ def build_module(mod, target, params=None,
enable_acl=True, tvm_ops=0, acl_parti
def build_and_run(mod, inputs, outputs, params, device, enable_acl=True,
no_runs=1,
- tvm_ops=0, acl_partitions=1):
+ tvm_ops=0, acl_partitions=1, config=None):
"""Build and run the relay module."""
- lib = build_module(mod, device.target, params, enable_acl, tvm_ops,
acl_partitions)
+ if not config:
Review comment:
```suggestion
if config is None:
```
##########
File path: tests/python/contrib/test_arm_compute_lib/test_pooling.py
##########
@@ -74,53 +142,133 @@ def test_pooling():
device = Device()
np.random.seed(0)
- for dtype, low, high, atol, rtol in [("float32", -127, 128, 0.001, 0.001),
("uint8", 0, 255, 0, 0)]:
- for size in [(2, 2), (3, 3)]:
- for stride in [(2, 2)]:
- shape = (1, size[0] + stride[0] * 5,
- size[1] + stride[1] * 5, 16)
- pad = (0, 0)
-
- inputs = {
- "a": tvm.nd.array(np.random.uniform(low, high,
shape).astype(dtype)),
- }
-
- outputs = []
- func = _get_model(shape, dtype, relay.nn.max_pool2d, size,
- stride, pad, True, iter(inputs))
- for acl in [False, True]:
- outputs.append(build_and_run(func, inputs, 1, None, device,
- enable_acl=acl)[0])
-
- params = {
- "size": size,
- "stride": stride,
- "shape": shape,
- "pooling type": "max",
- "dtype": dtype,
- "padding": pad
- }
- verify(outputs, atol=atol, rtol=rtol, params=params)
+ typef = ["nn.max_pool2d", "nn.avg_pool2d", "nn.l2_pool2d"]
+ dtype = [("float32", -127, 128, 0.001, 0.001), ("uint8", 0, 255, 1, 0)]
+ size = [(2, 2), (3, 3)]
+ stride = [(2, 2)]
+ pad = [(0, 0), (1, 1), (0, 1)]
+ ceil_mode = [False, True]
+ count_include_pad = [False, True]
+ input_shapes = [(8, 8, 16), (9, 9, 16)]
+ trials = generate_trials([typef, dtype, size, stride, pad, ceil_mode,
count_include_pad, input_shapes], 3)
+
+ for typef, (dtype, low, high, atol, rtol), size, stride, pad, ceil_mode,
count_include_pad, \
+ input_shape in trials:
+
+ # L2 pooling not currently supported for uint8
+ if typef == "nn.l2_pool2d" and dtype == "uint8":
+ continue
Review comment:
That's why I don't suggest random workloads. You won't need this checker
if all workloads are hard coded.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]