This is an automated email from the ASF dual-hosted git repository.
bgawrych pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new ded6096126 [FEATURE] Add pytest with benchmarking operator (#21088)
ded6096126 is described below
commit ded609612623fedcfb5fcc0366c5ee524b4a989b
Author: AdamGrabowski <[email protected]>
AuthorDate: Fri Jul 15 09:57:57 2022 +0200
[FEATURE] Add pytest with benchmarking operator (#21088)
* Add benchmarking operator pytest using benchmark_utils
* Unify quote mark use in benchmark
---
CONTRIBUTORS.md | 1 +
benchmark/opperf/rules/default_params.py | 8 +-
.../opperf/utils/benchmark_operators_pytest.py | 110 +++++++++++++++++++++
benchmark/opperf/utils/benchmark_utils.py | 25 ++++-
benchmark/opperf/utils/ndarray_utils.py | 6 +-
5 files changed, 141 insertions(+), 9 deletions(-)
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 792ab3f4d2..c214571224 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -300,6 +300,7 @@ List of Contributors
* [Maria Boerner](https://github.com/mariaboerner1987)
* [Zhenghui Jin](https://github.com/barry-jin)
* [Dominika Jedynak](https://github.com/DominikaJedynak)
+* [Adam Grabowski](https://github.com/agrabows)
Label Bot
---------
diff --git a/benchmark/opperf/rules/default_params.py
b/benchmark/opperf/rules/default_params.py
index 0474eea08f..69a22dfcac 100644
--- a/benchmark/opperf/rules/default_params.py
+++ b/benchmark/opperf/rules/default_params.py
@@ -779,10 +779,10 @@ PARAMS_OF_TYPE_NDARRAY = ["lhs", "rhs", "data", "base",
"exp", "sample",
"x", "condition", "a", "index", "raveL_data",
"label", "grid",
"A", "B", "C", "r1", "r2", "rois", "lrs", "wds",
"weights_sum_sq",
"grads_sum_sq", "mhs", "data1", "data2", "loc",
"parameters", "state",
- "state_cell"]
+ "state_cell", "tensor", "arrays", "mask",
"running_mean", "running_var"]
-PARAMS_OF_TYPE_NP_ARRAY = ["x1", "x2", "prototype", "object", "a", "b",
"fill_value", "array", "x", "arr",
+PARAMS_OF_TYPE_NP_ARRAY = ["x1", "x2", "prototype", "source_array", "object",
"a", "b", "fill_value", "array", "x", "arr",
"values", "ary", "seq", "arrays", "tup", "indices",
"m", "ar", "q", "p", "condition",
- "arys", "v", "A", "xp", "fp", "data", "mask",
"gamma", "beta", "running_mean",
- "running_var", "weight", "index", "lhs", "rhs"]
+ "arys", "v", "A", "xp", "fp", "data", "gamma",
"beta", "running_mean", "moving_mean", "moving_var",
+ "running_var", "weight", "index", "lhs", "rhs",
"parameters", "state", "mask", "bias"]
diff --git a/benchmark/opperf/utils/benchmark_operators_pytest.py
b/benchmark/opperf/utils/benchmark_operators_pytest.py
new file mode 100644
index 0000000000..2d61fca7ca
--- /dev/null
+++ b/benchmark/opperf/utils/benchmark_operators_pytest.py
@@ -0,0 +1,110 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+import mxnet as mx
+import pytest
+
+from benchmark.opperf.utils.benchmark_utils import run_benchmark_operator
+
+test_cases = {
+ "reshape" : [((128,128,128), {"newshape": (128,256,-1)}),
+ ((256,256,256), {"newshape": (256,512,-1)}),
+ ((512,512,512), {"newshape": (512,1024,-1)}),],
+ "swapaxes" : [((64,128,64), {"axis1": 1, "axis2": 2}),
+ ((128,256,128), {"axis1": 1, "axis2": 2}),
+ ((256,512,256), {"axis1": 1, "axis2": 2})],
+ "activation" : [((128,128,128), {"actType": "relu"}),
+ ((256,256,256), {"actType": "relu"}),
+ ((512,512,512), {"actType": "relu"})],
+ "batch_norm" : [((128,128,128), {}),
+ ((256,256,256), {}),
+ ((512,512,512), {})],
+ "convolution" : [((16,16,16,16,16), {"numFilter": 8, "kernel":
(3,3,3)}),
+ ((32,32,16,16,16), {"numFilter": 16, "kernel":
(5,5,5)}),
+ ((32,32,32,32,32), {"numFilter": 16, "kernel":
(7,7,7)})],
+ "add" : [((128,128,128), {}),
+ ((256,256,256), {}),
+ ((512,512,512), {})],
+ "masked_softmax" : [((128,128,128), {}),
+ ((256,256,256), {}),
+ ((512,512,512), {})],
+ "slice" : [((128,128,128), {"begin": (32,32,32), "end":
(-32,-32,-32)}),
+ ((256,256,256), {"begin": (64,64,64), "end":
(-64,-64,-64)}),
+ ((512,512,512), {"begin": (96,96,96), "end":
(-96,-96,-96)})],
+ "fully_connected" : [((20,20,20,20), {"numHidden": 30}),
+ ((60,60,60,60), {"numHidden": 60}),
+ ((90,90,90,90), {"numHidden": 90}),],
+ "batch_dot" : [((10,10,10), {"matrix1": (20,30), "matrix2":
(30,40)}),
+ ((20,20,20), {"matrix1": (40,50), "matrix2":
(50,60)}),
+ ((40,40,40), {"matrix1": (60,70), "matrix2":
(70,80)})]
+}
+
+def generate_test_cases():
+ tests = []
+ for op_name, cases in test_cases.items():
+ for case in cases:
+ tests.append((op_name, case[0], case[1]))
+ return tests
+
+def generate_test_ids():
+ test_ids = []
+ for op_name, cases in test_cases.items():
+ for case in cases:
+ s = op_name + "-shape_"
+ for i in range(len(case[0])):
+ s += str(case[0][i])
+ if (i != len(case[0])-1):
+ s += "x"
+ params = case[1].items()
+ if len(params) != 0:
+ s += "-params"
+ for key, value in params:
+ s += "_" + str(key) + "_"
+ if isinstance(value, tuple):
+ for i in range(len(value)):
+ s += str(value[i])
+ if (i != len(value)-1):
+ s += "x"
+ else:
+ s += str(value)
+ test_ids.append(s)
+ return test_ids
+
+generate_inputs = {
+ "reshape" : lambda shape, metadata: {"newshape":
metadata["newshape"], "shape": metadata["newshape"]},
+ "swapaxes" : lambda shape, metadata: {"axis1":
metadata["axis1"], "axis2": metadata["axis2"],
+ "dim1":
metadata["axis1"], "dim2": metadata["axis2"]},
+ "activation" : lambda shape, metadata: {"act_type":
metadata["actType"]},
+ "batch_norm" : lambda shape, metadata: {"gamma": (shape[1],),
"beta": (shape[1],), "running_mean": (shape[1],), "running_var": (shape[1],),
+ "moving_mean":
(shape[1],), "moving_var": (shape[1],)},
+ "convolution" : lambda shape, metadata: {"weight":
(metadata["numFilter"], shape[1]) + metadata["kernel"], "kernel":
metadata["kernel"],
+ "bias":
(metadata["numFilter"],), "num_filter": metadata["numFilter"]},
+ "masked_softmax" : lambda shape, metadata: {"mask":
mx.np.array(round(mx.np.random.rand(*shape)), dtype="bool")},
+ "fully_connected" : lambda shape, metadata: {"weight":
(metadata["numHidden"], shape[-1]), "bias": (metadata["numHidden"],),
+ "num_hidden":
metadata["numHidden"], "flatten": False},
+ "batch_dot" : lambda shape, metadata: {"lhs": shape +
metadata["matrix1"], "a": shape + metadata["matrix1"],
+ "rhs": shape +
metadata["matrix2"], "b": shape + metadata["matrix2"]},
+ "slice" : lambda shape, metadata: {"begin":
metadata["begin"], "end": metadata["end"]}
+}
+
[email protected](argnames=("op_name, shape, params"),
argvalues=generate_test_cases(), ids=generate_test_ids())
+def test(op_name, shape, params):
+ if op_name in generate_inputs.keys():
+ additional_inputs = generate_inputs[op_name](shape,params)
+ else:
+ additional_inputs = {}
+ run_benchmark_operator(name=op_name, size=shape,
additional_inputs=additional_inputs, profiler="python")
diff --git a/benchmark/opperf/utils/benchmark_utils.py
b/benchmark/opperf/utils/benchmark_utils.py
index 38a1c15a61..fc756f42b4 100644
--- a/benchmark/opperf/utils/benchmark_utils.py
+++ b/benchmark/opperf/utils/benchmark_utils.py
@@ -101,6 +101,25 @@ def get_mx_np_ndarray(ctx, in_tensor, dtype, initializer,
attach_grad=True):
tensor.wait_to_read()
return tensor
+def adjust_op_name(module, name):
+ np_to_nd_func = {
+ "batch_norm": "BatchNorm",
+ "fully_connected": "FullyConnected",
+ "activation": "Activation",
+ "convolution": "Convolution" }
+ nd_to_np_func = {
+ "BatchNorm": "batch_norm",
+ "FullyConnected": "fully_connected",
+ "Activation": "activation",
+ "Convolution": "convolution" }
+
+ if (module == mx.nd and (hasattr(mx.np, name) or hasattr(mx.npx, name))
and name in np_to_nd_func.keys()):
+ return np_to_nd_func[name]
+ elif ((module == mx.np or module == mx.npx) and hasattr(mx.nd, name) and
name in nd_to_np_func.keys()):
+ return nd_to_np_func[name]
+ else:
+ return name
+
def parse_input_ndarray(input_dict):
"""Parse input for ndarray and extract array shape for better readability
@@ -242,9 +261,10 @@ def run_benchmark_operator(name, size = (128,128),
additional_inputs = {},
modules = [mx.nd, mx.np, mx.npx]
responses = []
for module in modules:
+ name = adjust_op_name(module, name)
if hasattr(module, name):
function = getattr(module, name)
- args = inspect.getargspec(function).args
+ args = inspect.signature(function).parameters.keys()
inputs = {}
for arg in args:
if arg in additional_inputs.keys():
@@ -252,8 +272,7 @@ def run_benchmark_operator(name, size = (128,128),
additional_inputs = {},
elif arg in arg_list[module]:
inputs.update({arg:size})
res = run_performance_test(function, run_backward=run_backward,
dtype=dtype, ctx=ctx,
- inputs=[inputs],
- warmup=warmup, runs=runs, profiler=profiler)
+ inputs=[inputs], warmup=warmup,
runs=runs, profiler=profiler)
responses.append(res)
else:
responses.append(str(module.__name__) + " does not have operator "
+ name)
diff --git a/benchmark/opperf/utils/ndarray_utils.py
b/benchmark/opperf/utils/ndarray_utils.py
index 3f5dda8f03..3ea3517deb 100644
--- a/benchmark/opperf/utils/ndarray_utils.py
+++ b/benchmark/opperf/utils/ndarray_utils.py
@@ -131,9 +131,11 @@ def get_mx_ndarray(ctx, in_tensor, dtype, initializer,
attach_grad=True):
elif isinstance(in_tensor, list):
tensor = nd.array(in_tensor, ctx=ctx, dtype=dtype)
elif isinstance(in_tensor, np.ndarray):
- tensor = nd.array(in_tensor, ctx=ctx, dtype=dtype)
+ tensor = nd.array(in_tensor)
+ elif isinstance(in_tensor, mx.np.ndarray):
+ tensor = in_tensor.as_nd_ndarray()
elif isinstance(in_tensor, nd.NDArray):
- tensor = in_tensor.as_in_context(ctx).astype(dtype=dtype)
+ tensor = in_tensor.as_in_context(ctx)
else:
raise ValueError("Invalid input type for creating input tensor. Input
can be tuple() of shape or Numpy Array or"
" MXNet NDArray. Given - ", in_tensor)