This is an automated email from the ASF dual-hosted git repository.
haibin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new 2193819 [MXNET-788] Fix for issue #11733 pooling op test (#12067)
2193819 is described below
commit 2193819d40792d0526118819b991111e7ac4162d
Author: Sam Skalicky <[email protected]>
AuthorDate: Sun Aug 12 12:43:19 2018 -0700
[MXNET-788] Fix for issue #11733 pooling op test (#12067)
* added support to check_consistency function to generate random numbers
for a specific datatype (ie. fp16)
this ensures that for tests that compare results among different
precisions, that data is generated in the least precise type and casted to the
most precise
changed test_pooling_with_type test case to specify fp16 precision for
random input data
renamed the 2nd test_pooling_with_type function to test_pooling_with_type2
so it doesnt redefine the first and both are tested
fixed equation formatting issue in pooling operator description
Added myself to the contributors readme file
* updated from latest in master (had old version of the file)
* shortened lines per lint spec
* renamed default_type argument to rand_type for clarity
updated function docstring with argument description
removed rand_type setting for non-max pooling tests
* cleaned up check_consistency function docstring
---
CONTRIBUTORS.md | 1 +
python/mxnet/test_utils.py | 17 +++++++++++------
src/operator/nn/pooling.cc | 3 +--
tests/python/gpu/test_operator_gpu.py | 21 ++++++++-------------
4 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index b04e4a3..6bc97bb 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -176,3 +176,4 @@ List of Contributors
* [Kou Ding](https://github.com/chinakook)
* [Istvan Fehervari](https://github.com/ifeherva)
* [Aaron Markham](https://github.com/aaronmarkham)
+* [Sam Skalicky](https://github.com/samskalicky)
diff --git a/python/mxnet/test_utils.py b/python/mxnet/test_utils.py
index e963d15..69d916e 100644
--- a/python/mxnet/test_utils.py
+++ b/python/mxnet/test_utils.py
@@ -479,10 +479,8 @@ def assert_almost_equal(a, b, rtol=None, atol=None,
names=('a', 'b'), equal_nan=
"""
rtol = get_rtol(rtol)
atol = get_atol(atol)
-
if almost_equal(a, b, rtol, atol, equal_nan=equal_nan):
return
-
index, rel = find_max_violation(a, b, rtol, atol)
np.set_printoptions(threshold=4, suppress=True)
msg = npt.build_err_msg([a, b],
@@ -1203,10 +1201,10 @@ def check_speed(sym, location=None, ctx=None, N=20,
grad_req=None, typ="whole",
else:
raise ValueError('typ can only be "whole" or "forward".')
-
def check_consistency(sym, ctx_list, scale=1.0, grad_req='write',
arg_params=None, aux_params=None, tol=None,
- raise_on_err=True, ground_truth=None, equal_nan=False,
use_uniform=False):
+ raise_on_err=True, ground_truth=None, equal_nan=False,
+ use_uniform=False, rand_type=np.float64):
"""Check symbol gives the same output for different running context
Parameters
@@ -1223,6 +1221,11 @@ def check_consistency(sym, ctx_list, scale=1.0,
grad_req='write',
Optional, When flag set to true,
random input data generated follows uniform distribution,
not normal distribution
+ rand_type: np.dtype
+ casts the randomly generated data to this type
+ Optional, when input data is passed via arg_params,
+ defaults to np.float64 (numpy float default)
+
Examples
--------
>>> # create the symbol
@@ -1283,9 +1286,11 @@ def check_consistency(sym, ctx_list, scale=1.0,
grad_req='write',
for n, arr in exe_list[0].arg_dict.items():
if n not in arg_params:
if use_uniform:
- arg_params[n] = np.random.uniform(low=-0.92, high=0.92,
size=arr.shape)
+ arg_params[n] = np.random.uniform(low=-0.92, high=0.92,
+
size=arr.shape).astype(rand_type)
else:
- arg_params[n] = np.random.normal(size=arr.shape, scale=scale)
+ arg_params[n] = np.random.normal(size=arr.shape,
+ scale=scale).astype(rand_type)
for n, arr in exe_list[0].aux_dict.items():
if n not in aux_params:
aux_params[n] = 0
diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc
index 9b6996d..2380f0f 100644
--- a/src/operator/nn/pooling.cc
+++ b/src/operator/nn/pooling.cc
@@ -377,8 +377,7 @@ We can see that Lp pooling stands between those two, in
practice the most common
For each window ``X``, the mathematical expression for Lp pooling is:
-..math::
- f(X) = \sqrt{p}{\sum\limits_{x \in X} x^p}
+:math:`f(X) = \sqrt[p]{\sum_{x}^{X} x^p}`
)code" ADD_FILELINE)
.set_num_inputs(1)
diff --git a/tests/python/gpu/test_operator_gpu.py
b/tests/python/gpu/test_operator_gpu.py
index 89df63e..5612b0a 100644
--- a/tests/python/gpu/test_operator_gpu.py
+++ b/tests/python/gpu/test_operator_gpu.py
@@ -614,13 +614,13 @@ def test_pooling_with_type():
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict':
{'pool_data': np.float64}},
{'ctx': mx.cpu(0), 'pool_data': (2, 2, 10, 10), 'type_dict':
{'pool_data': np.float32}}]
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max',
pooling_convention='valid', name='pool')
- check_consistency(sym, ctx_list)
+ check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(3,3), pool_type='max',
pooling_convention='full', name='pool')
- check_consistency(sym, ctx_list)
+ check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(kernel=(300,300), pool_type='max', global_pool=True,
name='pool')
- check_consistency(sym, ctx_list)
+ check_consistency(sym, ctx_list, rand_type=np.float16)
@with_seed()
@@ -765,11 +765,8 @@ def test_spatial_transformer_with_type():
check_consistency(sym, ctx_list, grad_req="add")
-# Checking max pooling consistency over the data sets of different float types
is problematic
-# as one max value in a float32 data set may not be the max value in a float16
data set.
-# This function will not be called.
-@with_seed(1234)
-def test_pooling_with_type():
+@with_seed()
+def test_pooling_with_type2():
ctx_list = [{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict':
{'pool_data': np.float64}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict':
{'pool_data': np.float32}},
{'ctx': mx.gpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict':
{'pool_data': np.float16}},
@@ -777,19 +774,17 @@ def test_pooling_with_type():
{'ctx': mx.cpu(0), 'pool_data': (10, 2, 10, 10), 'type_dict':
{'pool_data': np.float32}}]
sym = mx.sym.Pooling(name='pool', kernel=(3,3), stride=(2,2),
pool_type='max')
- check_consistency(sym, ctx_list)
+ check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='avg')
check_consistency(sym, ctx_list)
- # this is unstable
- # sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2),
pool_type='max')
- # check_consistency(sym, ctx_list)
+ sym = mx.sym.Pooling(name='pool', kernel=(5,5), pad=(2,2), pool_type='max')
+ check_consistency(sym, ctx_list, rand_type=np.float16)
sym = mx.sym.Pooling(name='pool', kernel=(3,3), pad=(1,1), pool_type='sum')
check_consistency(sym, ctx_list)
-
@unittest.skip("Flaky test
https://github.com/apache/incubator-mxnet/issues/11517")
@with_seed()
def test_pooling_versions():