This is an automated email from the ASF dual-hosted git repository.
reminisce pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new 2fd4720 fix acc_type_switch macro with extra tests (#14773)
2fd4720 is described below
commit 2fd4720342f87be80f280176ffbecb8f37e49b73
Author: Hao Jin <[email protected]>
AuthorDate: Tue Apr 23 20:23:09 2019 -0700
fix acc_type_switch macro with extra tests (#14773)
---
src/operator/mxnet_op.h | 4 ++++
src/operator/tensor/broadcast_reduce-inl.cuh | 13 +++++++++++
src/operator/tensor/broadcast_reduce-inl.h | 13 +++++++++++
tests/python/unittest/test_operator.py | 35 +++++++++++++++++-----------
4 files changed, 52 insertions(+), 13 deletions(-)
diff --git a/src/operator/mxnet_op.h b/src/operator/mxnet_op.h
index e331255..f17b708 100644
--- a/src/operator/mxnet_op.h
+++ b/src/operator/mxnet_op.h
@@ -335,24 +335,28 @@ inline int get_num_threads<cpu>(const int N) {
{ \
typedef uint8_t DType; \
typedef uint32_t AType; \
+ {__VA_ARGS__} \
} \
break; \
case mshadow::kInt8: \
{ \
typedef int8_t DType; \
typedef int32_t AType; \
+ {__VA_ARGS__} \
} \
break; \
case mshadow::kInt32: \
{ \
typedef int32_t DType; \
typedef int64_t AType; \
+ {__VA_ARGS__} \
} \
break; \
case mshadow::kInt64: \
{ \
typedef int64_t DType; \
typedef int64_t AType; \
+ {__VA_ARGS__} \
} \
break; \
default: \
diff --git a/src/operator/tensor/broadcast_reduce-inl.cuh
b/src/operator/tensor/broadcast_reduce-inl.cuh
index 54db350..1b0127a 100644
--- a/src/operator/tensor/broadcast_reduce-inl.cuh
+++ b/src/operator/tensor/broadcast_reduce-inl.cuh
@@ -617,6 +617,8 @@ void Reduce(Stream<gpu> *s, const TBlob& small, const
OpReqType req,
ReduceImplConfig<ndim> config =
ConfigureReduceImpl<ndim, DType>(small.shape_, big.shape_, NULL, NULL);
if (safe_acc) {
+ // TODO(haojin2): Use real-only type swtich for windows temporarily due to
CI issues.
+#ifndef _WIN32
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type
AccType;
MSHADOW_TYPE_SWITCH(small.type_flag_, OType, {
@@ -626,6 +628,17 @@ void Reduce(Stream<gpu> *s, const TBlob& small, const
OpReqType req,
stream, small, req, big, workspace, config);
});
});
+#else
+ MXNET_REAL_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType,
AType, {
+ typedef typename std::conditional<safe_acc, AType, DataType>::type
AccType;
+ MSHADOW_TYPE_SWITCH(small.type_flag_, OType, {
+ typedef typename std::conditional<safe_acc, OType, DataType>::type
OutType;
+ config = ConfigureReduceImpl<ndim, AccType>(small.shape_, big.shape_,
NULL, NULL);
+ ReduceImpl<Reducer, ndim, AccType, DataType, OutType, OP>(
+ stream, small, req, big, workspace, config);
+ });
+ });
+#endif
} else {
ReduceImpl<Reducer, ndim, DType, DType, DType, OP>(stream, small, req,
big, workspace, config);
}
diff --git a/src/operator/tensor/broadcast_reduce-inl.h
b/src/operator/tensor/broadcast_reduce-inl.h
index be589c4..d107e89 100644
--- a/src/operator/tensor/broadcast_reduce-inl.h
+++ b/src/operator/tensor/broadcast_reduce-inl.h
@@ -239,6 +239,8 @@ void Reduce(Stream<cpu>* s, const TBlob& small, const
OpReqType req,
N, M, req == kAddTo, big.dptr<DType>(), small.dptr<DType>(),
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
} else {
+ // TODO(haojin2): Use real-only type swtich for windows temporarily due to
CI issues.
+#ifndef _WIN32
MXNET_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType, AType, {
typedef typename std::conditional<safe_acc, AType, DataType>::type
AccType;
MSHADOW_TYPE_SWITCH(small.type_flag_, OType, {
@@ -248,6 +250,17 @@ void Reduce(Stream<cpu>* s, const TBlob& small, const
OpReqType req,
big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
});
});
+#else
+ MXNET_REAL_ACC_TYPE_SWITCH(mshadow::DataType<DType>::kFlag, DataType,
AType, {
+ typedef typename std::conditional<safe_acc, AType, DataType>::type
AccType;
+ MSHADOW_TYPE_SWITCH(small.type_flag_, OType, {
+ typedef typename std::conditional<safe_acc, OType, DataType>::type
OutType;
+ seq_reduce_compute<Reducer, ndim, AccType, DataType, OutType, OP>(
+ N, M, req == kAddTo, big.dptr<DataType>(), small.dptr<OutType>(),
+ big.shape_.get<ndim>(), small.shape_.get<ndim>(), rshape, rstride);
+ });
+ });
+#endif
}
}
diff --git a/tests/python/unittest/test_operator.py
b/tests/python/unittest/test_operator.py
index 5d34b12..eb10b3b 100644
--- a/tests/python/unittest/test_operator.py
+++ b/tests/python/unittest/test_operator.py
@@ -3419,12 +3419,19 @@ def test_norm():
in_data_dim = random_sample([4,5,6], 1)[0]
in_shape = rand_shape_nd(in_data_dim, dim=5)
epsilon = 1e-3
- acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64:
np.float64}
+ acc_type = {np.float16: np.float32, np.float32: np.float32, np.float64:
np.float64,
+ np.int32: np.int32, np.int64: np.int64}
+ is_windows = sys.platform.startswith('win')
for order in [1, 2]:
- for dtype in [np.float16, np.float32, np.float64]:
+ for dtype in [np.float16, np.float32, np.float64, np.int32, np.int64]:
for i in range(in_data_dim):
- for out_dtype in ['float32', 'float64']:
+ for out_dtype in ['float32', 'float64', 'int32', 'int64']:
+ if (dtype == np.int32 or dtype == np.int64) and ('int' not
in out_dtype or is_windows):
+ continue
+ if dtype != np.int32 and dtype != np.int64 and 'int' in
out_dtype:
+ continue
backward_dtype = np.float32 if out_dtype == 'float32' else
np.float64
+ skip_backward = 'int' in out_dtype
print(order, dtype, i, out_dtype, in_shape)
in_data = np.random.uniform(-1, 1,
in_shape).astype(acc_type[dtype])
in_data[abs(in_data) < epsilon] = 2 * epsilon
@@ -3433,13 +3440,14 @@ def test_norm():
npy_out_backward = np.sign(in_data) if order is 1 else
in_data/npy_out
check_symbolic_forward(norm_sym, [in_data.astype(dtype)],
[npy_out.astype(out_dtype)],
rtol=1e-3, atol=1e-5, ctx=ctx)
- check_symbolic_backward(norm_sym, [in_data.astype(dtype)],
-
[np.ones(npy_out.shape).astype(out_dtype)],
- [npy_out_backward], rtol=1e-3,
atol=1e-5, ctx=ctx,
- dtype=backward_dtype)
+ if not skip_backward:
+ check_symbolic_backward(norm_sym,
[in_data.astype(dtype)],
+
[np.ones(npy_out.shape).astype(out_dtype)],
+ [npy_out_backward], rtol=1e-3,
atol=1e-5, ctx=ctx,
+ dtype=backward_dtype)
# Disable numeric gradient
https://github.com/apache/incubator-mxnet/issues/11509
# check gradient
- if dtype is not np.float16:
+ if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data],
numeric_eps=epsilon,
rtol=1e-1, atol=1e-3,
dtype=backward_dtype)
if i < in_data_dim-1:
@@ -3449,12 +3457,13 @@ def test_norm():
check_symbolic_forward(norm_sym, [in_data],
[npy_out.astype(dtype)],
rtol=1e-3 if dtype is
np.float16 else 1e-3,
atol=1e-5 if dtype is
np.float16 else 1e-5, ctx=ctx)
- check_symbolic_backward(norm_sym, [in_data],
-
[np.ones(npy_out.shape).astype(out_dtype)],
-
[npy_out_backward.astype(out_dtype)],
- rtol=1e-3, atol=1e-5, ctx=ctx,
dtype=backward_dtype)
+ if not skip_backward:
+ check_symbolic_backward(norm_sym, [in_data],
+
[np.ones(npy_out.shape).astype(out_dtype)],
+
[npy_out_backward.astype(out_dtype)],
+ rtol=1e-3, atol=1e-5,
ctx=ctx, dtype=backward_dtype)
# check gradient
- if dtype is not np.float16:
+ if dtype is not np.float16 and not skip_backward:
check_numeric_gradient(norm_sym, [in_data],
numeric_eps=epsilon,
rtol=1e-1, atol=1e-3,
dtype=backward_dtype)