luobao-intel closed pull request #11627: Fall back when sparse arrays are
passed to MKLDNN-enabled operators
URL: https://github.com/apache/incubator-mxnet/pull/11627
This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:
As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):
diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc
index d723bbe62d7..c0977df2c28 100644
--- a/src/operator/nn/activation.cc
+++ b/src/operator/nn/activation.cc
@@ -103,12 +103,6 @@ inline static bool ActivationStorageType(const
nnvm::NodeAttrs& attrs,
bool ret = ElemwiseStorageType<1, 1, false, false, false>(attrs, dev_mask,
dispatch_mode,
in_attrs,
out_attrs);
-#if MXNET_USE_MKLDNN == 1
- const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
- if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) {
- *dispatch_mode = DispatchMode::kFComputeEx;
- }
-#endif
return ret;
}
@@ -139,11 +133,6 @@ inline static bool BackwardActStorageType(const
nnvm::NodeAttrs& attrs,
in_attrs, out_attrs);
#endif
CHECK_EQ(out_attrs->size(), 1U);
-#if MXNET_USE_MKLDNN == 1
- if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) {
- *dispatch_mode = DispatchMode::kFComputeEx;
- }
-#endif
return ret;
}
diff --git a/src/operator/nn/convolution-inl.h
b/src/operator/nn/convolution-inl.h
index d40abaf1fd6..c504b4a687d 100644
--- a/src/operator/nn/convolution-inl.h
+++ b/src/operator/nn/convolution-inl.h
@@ -399,6 +399,7 @@ void ConvolutionCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>& inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
+ LOG(INFO) << "This is ConvolutionCompute";
const ConvolutionParam& param = nnvm::get<ConvolutionParam>(attrs.parsed);
MSHADOW_REAL_TYPE_SWITCH(inputs[conv::kData].type_flag_, DType, {
ConvolutionOp<xpu, DType> op;
@@ -412,6 +413,8 @@ void ConvolutionGradCompute(const nnvm::NodeAttrs& attrs,
const OpContext& ctx, const std::vector<TBlob>&
inputs,
const std::vector<OpReqType>& req,
const std::vector<TBlob>& outputs) {
+
+ LOG(INFO) << "This is ConvolutionGradCompute";
const ConvolutionParam& param = nnvm::get<ConvolutionParam>(attrs.parsed);
std::vector<TBlob> in_data(inputs.begin() + 1, inputs.end());
const TBlob &out_grad = inputs[0];
diff --git a/src/operator/nn/convolution.cc b/src/operator/nn/convolution.cc
index ef70ccd6ec1..ffebd3ded3e 100644
--- a/src/operator/nn/convolution.cc
+++ b/src/operator/nn/convolution.cc
@@ -25,6 +25,8 @@
*/
#include "./convolution-inl.h"
+#include "../operator_common.h"
+#include "../../common/utils.h"
#include "../elemwise_op_common.h"
#include "./mkldnn/mkldnn_ops-inl.h"
#include "./mkldnn/mkldnn_base-inl.h"
@@ -54,7 +56,8 @@ static void ConvolutionComputeExCPU(const nnvm::NodeAttrs&
attrs,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
- const ConvolutionParam& params = nnvm::get<ConvolutionParam>(attrs.parsed);
+ LOG(INFO) << "This is ConvolutionComputeExCPU";
+ const ConvolutionParam& params = nnvm::get<ConvolutionParam>(attrs.parsed);
if (SupportMKLDNNConv(params, inputs[0])) {
MKLDNN_OPCHECK_INIT(false, outputs.size(), inputs, outputs);
MKLDNNConvolutionForward(attrs, ctx, inputs, req, outputs);
@@ -69,6 +72,7 @@ static void ConvolutionGradComputeExCPU(const
nnvm::NodeAttrs& attrs,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
+ LOG(INFO) << "This is ConvolutionGradComputeExCPU";
const ConvolutionParam& params = nnvm::get<ConvolutionParam>(attrs.parsed);
if (SupportMKLDNNConv(params, inputs[0])) {
MKLDNN_OPCHECK_INIT(true, outputs.size(), inputs, outputs);
@@ -305,8 +309,16 @@ inline static bool ConvStorageType(const nnvm::NodeAttrs&
attrs,
else
#endif
wanted_mode = DispatchMode::kFCompute;
- return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
- dispatch_mode, wanted_mode);
+
+ bool dispatched = false;
+ if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)){
+ dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
+ }
+ if (!dispatched){
+ dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
+ }
+
+ return dispatched;
}
inline static bool BackwardConvStorageType(const nnvm::NodeAttrs& attrs,
@@ -327,8 +339,15 @@ inline static bool BackwardConvStorageType(const
nnvm::NodeAttrs& attrs,
else
#endif
wanted_mode = DispatchMode::kFCompute;
- return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
- dispatch_mode, wanted_mode);
+ bool dispatched = false;
+ if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)){
+ dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
+ }
+ if (!dispatched){
+ dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
+ }
+
+ return dispatched;
}
void ConvolutionParamParser(nnvm::NodeAttrs* attrs) {
diff --git a/src/operator/nn/deconvolution.cc b/src/operator/nn/deconvolution.cc
index 9e0a70121bf..edd0b5ea67e 100644
--- a/src/operator/nn/deconvolution.cc
+++ b/src/operator/nn/deconvolution.cc
@@ -27,6 +27,8 @@
#include "./deconvolution-inl.h"
#include "./mkldnn/mkldnn_ops-inl.h"
#include "./mkldnn/mkldnn_base-inl.h"
+#include "../operator_common.h"
+#include "../../common/utils.h"
namespace mxnet {
namespace op {
@@ -273,8 +275,15 @@ inline static bool DeconvStorageType(const
nnvm::NodeAttrs& attrs,
else
#endif
wanted_mode = DispatchMode::kFCompute;
- return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
- dispatch_mode, wanted_mode);
+
+ bool dispatched = false;
+ if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)){
+ dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
+ }
+ if (!dispatched){
+ dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
+ }
+ return dispatched;
}
inline static bool BackwardDeconvStorageType(const nnvm::NodeAttrs& attrs,
@@ -294,8 +303,15 @@ inline static bool BackwardDeconvStorageType(const
nnvm::NodeAttrs& attrs,
else
#endif
wanted_mode = DispatchMode::kFCompute;
- return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
- dispatch_mode, wanted_mode);
+
+ bool dispatched = false;
+ if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)){
+ dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
+ }
+ if (!dispatched){
+ dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
+ }
+ return dispatched;
}
#if MXNET_USE_MKLDNN == 1
diff --git a/src/operator/nn/lrn.cc b/src/operator/nn/lrn.cc
index 6b3d7c81837..b09a108a6a1 100644
--- a/src/operator/nn/lrn.cc
+++ b/src/operator/nn/lrn.cc
@@ -26,6 +26,7 @@
#include "./lrn-inl.h"
#include "../operator_common.h"
+#include "../../common/utils.h"
#if MXNET_USE_MKLDNN == 1
#include "./mkldnn/mkldnn_lrn-inl.h"
#endif
@@ -87,16 +88,22 @@ bool LRNForwardInferStorageType(const nnvm::NodeAttrs&
attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK(!in_attrs->empty());
+ DispatchMode wanted_mode;
#if MXNET_USE_MKLDNN == 1
- if (dev_mask == mshadow::cpu::kDevMask) {
- storage_type_assign(out_attrs, mxnet::kDefaultStorage,
- dispatch_mode, DispatchMode::kFComputeEx);
- return true;
- }
+ if (dev_mask == mshadow::cpu::kDevMask)
+ wanted_mode = DispatchMode::kFComputeEx;
+ else
#endif
- storage_type_assign(out_attrs, mxnet::kDefaultStorage,
- dispatch_mode, DispatchMode::kFCompute);
- return true;
+ wanted_mode = DispatchMode::kFCompute;
+
+ bool dispatched = false;
+ if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)){
+ dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
+ }
+ if (!dispatched){
+ dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
+ }
+ return dispatched;
}
bool LRNBackwardInferStorageType(const nnvm::NodeAttrs& attrs,
@@ -105,16 +112,22 @@ bool LRNBackwardInferStorageType(const nnvm::NodeAttrs&
attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK(!in_attrs->empty());
+ DispatchMode wanted_mode;
#if MXNET_USE_MKLDNN == 1
- if (dev_mask == mshadow::cpu::kDevMask) {
- storage_type_assign(out_attrs, mxnet::kDefaultStorage,
- dispatch_mode, DispatchMode::kFComputeEx);
- return true;
- }
+ if (dev_mask == mshadow::cpu::kDevMask)
+ wanted_mode = DispatchMode::kFComputeEx;
+ else
#endif
- storage_type_assign(out_attrs, mxnet::kDefaultStorage,
- dispatch_mode, DispatchMode::kFCompute);
- return true;
+ wanted_mode = DispatchMode::kFCompute;
+
+ bool dispatched = false;
+ if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)){
+ dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
+ }
+ if (!dispatched){
+ dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
+ }
+ return dispatched;
}
#if MXNET_USE_MKLDNN == 1
diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc
index 3200a515d6f..8d672620e67 100644
--- a/src/operator/nn/pooling.cc
+++ b/src/operator/nn/pooling.cc
@@ -25,6 +25,8 @@
*/
#include "../elemwise_op_common.h"
#include "./pooling-inl.h"
+#include "../operator_common.h"
+#include "../../common/utils.h"
#if MXNET_USE_NNPACK == 1
#include "../nnpack/nnpack_pooling-inl.h"
#endif // MXNET_USE_NNPACK
@@ -277,17 +279,26 @@ inline static bool PoolingStorageType(const
nnvm::NodeAttrs &attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
+ DispatchMode wanted_mode;
+
#if MXNET_USE_MKLDNN == 1
const PoolingParam ¶m = nnvm::get<PoolingParam>(attrs.parsed);
- if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) {
- return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
- dispatch_mode, DispatchMode::kFComputeEx);
- }
-#else
- CHECK_EQ(out_attrs->size(), 1);
+ if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param))
+ wanted_mode = DispatchMode::kFComputeEx;
+ else
#endif
- return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
- dispatch_mode, DispatchMode::kFCompute);
+ wanted_mode = DispatchMode::kFCompute;
+
+ CHECK_EQ(out_attrs->size(), 1);
+ bool dispatched = false;
+ if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)){
+ dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
+ }
+ if (!dispatched){
+ dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
+ }
+
+ return dispatched;
}
inline static bool BackwardPoolingStorageType(const nnvm::NodeAttrs &attrs,
@@ -299,16 +310,25 @@ inline static bool BackwardPoolingStorageType(const
nnvm::NodeAttrs &attrs,
CHECK_EQ(in_attrs->size(), GetNumBackInputs(param));
CHECK_EQ(out_attrs->size(), 1);
+ DispatchMode wanted_mode;
+
#if MXNET_USE_MKLDNN == 1
- if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) {
- return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
- dispatch_mode, DispatchMode::kFComputeEx);
- }
-#else
- CHECK_EQ(in_attrs->size(), 3);
+ if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param))
+ wanted_mode = DispatchMode::kFComputeEx;
+ else
#endif
- return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
- dispatch_mode, DispatchMode::kFCompute);
+ wanted_mode = DispatchMode::kFCompute;
+
+ CHECK_EQ(in_attrs->size(), 3);
+ bool dispatched = false;
+ if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)){
+ dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
+ }
+ if (!dispatched){
+ dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
+ }
+
+ return dispatched;
}
DMLC_REGISTER_PARAMETER(PoolingParam);
diff --git a/src/operator/nn/softmax.cc b/src/operator/nn/softmax.cc
index e9b104f1286..78e22b318a6 100644
--- a/src/operator/nn/softmax.cc
+++ b/src/operator/nn/softmax.cc
@@ -27,6 +27,8 @@
#include "../tensor/elemwise_binary_op.h"
#include "mkldnn/mkldnn_base-inl.h"
#include "mkldnn/mkldnn_ops-inl.h"
+#include "../operator_common.h"
+#include "../../common/utils.h"
namespace mxnet {
namespace op {
@@ -38,7 +40,7 @@ static void SoftmaxComputeExCPU(const nnvm::NodeAttrs& attrs,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
- // It seems MKLDNN softmax doesn't support training.
+// It seems MKLDNN softmax doesn't support training.
if (SupportMKLDNN(inputs[0]) && !ctx.is_train) {
MKLDNN_OPCHECK_INIT(false, outputs.size(), inputs, outputs);
MKLDNNSoftmaxForward(attrs, ctx, inputs[0], req[0], outputs[0]);
@@ -49,7 +51,6 @@ static void SoftmaxComputeExCPU(const nnvm::NodeAttrs& attrs,
FallBackCompute(SoftmaxCompute<cpu, mxnet_op::softmax_fwd>, attrs, ctx,
inputs, req, outputs);
}
-#endif
inline static bool SoftmaxStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
@@ -67,9 +68,18 @@ inline static bool SoftmaxStorageType(const nnvm::NodeAttrs&
attrs,
else
#endif
wanted_mode = DispatchMode::kFCompute;
- return storage_type_assign(out_attrs,
static_cast<NDArrayStorageType>((*in_attrs)[0]),
- dispatch_mode, wanted_mode);
+
+ bool dispatched = false;
+ if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)){
+ dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
+ }
+ if (!dispatched){
+ dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
+ }
+
+ return dispatched;
}
+#endif
MXNET_OPERATOR_REGISTER_UNARY(softmax)
.describe(R"code(Applies the softmax function.
diff --git a/tests/python/mkl/test_mkldnn.py b/tests/python/mkl/test_mkldnn.py
index dad1bd7d615..f9d29437548 100644
--- a/tests/python/mkl/test_mkldnn.py
+++ b/tests/python/mkl/test_mkldnn.py
@@ -240,5 +240,141 @@ def check_batchnorm_training(stype):
for stype in stypes:
check_batchnorm_training(stype)
+@with_seed()
+def test_softmax():
+ def check_softmax_training(stype):
+ for shape in [(2, 3), (2, 3, 2, 2)]:
+ data_tmp = np.random.normal(-0.1, 0.1, size=shape)
+
+ data = mx.symbol.Variable('data', stype=stype)
+ in_location = [mx.nd.array(data_tmp).tostype(stype)]
+
+ test = mx.symbol.softmax(data, axis=-1)
+ check_numeric_gradient(test, in_location,
numeric_eps=1e-2,rtol=0.16, atol=1e-4)
+
+ stypes = ['row_sparse', 'default']
+ for stype in stypes:
+ check_softmax_training(stype)
+
+@with_seed()
+def test_SoftmaxOutput():
+ def check_SoftmaxOutput_training(stype):
+ for shape in [(10, 3)]:
+ data_tmp = np.random.normal(-0.1, 0.1, size=shape)
+ label_tmp = np.random.randint(2,size=shape[0])
+
+ data = mx.symbol.Variable('data', stype=stype)
+ label = mx.symbol.Variable('label',stype=stype)
+
+ in_location =
[mx.nd.array(data_tmp).tostype(stype),mx.nd.array(label_tmp).tostype(stype)]
+
+ test = mx.symbol.SoftmaxOutput(data,label)
+ check_numeric_gradient(test, in_location,
numeric_eps=1e-2,rtol=0.16, atol=1e-2)
+
+ stypes = ['row_sparse', 'default']
+ for stype in stypes:
+ check_SoftmaxOutput_training(stype)
+
+@with_seed()
+def test_pooling():
+ def check_pooling_training(stype):
+ for shape in [(3, 3, 10),(3, 3, 20, 20)]:
+ data_tmp = np.random.normal(-0.1,0.1, size=shape)
+ data = mx.symbol.Variable('data', stype=stype)
+ in_location = [mx.nd.array(data_tmp).tostype(stype)]
+
+ if np.array(shape).shape[0] == 3:
+ test = mx.symbol.Pooling(data=data, kernel=(3,), stride=(2),
pool_type='avg')
+ elif np.array(shape).shape[0] == 4:
+ test = mx.symbol.Pooling(data=data, kernel=(3, 3), stride=(2,
2), pool_type='avg')
+ else:
+ return 0
+ # check_numeric_gradient(test, in_location,
numeric_eps=1e-3,rtol=1e-5, atol=1e-6)
+ check_numeric_gradient(test, in_location,
numeric_eps=1e-2,rtol=0.16, atol=1e-2)
+
+ stypes = ['row_sparse', 'default']
+ for stype in stypes:
+ check_pooling_training(stype)
+
+@with_seed()
+def test_activation():
+ def check_activation_training(stype):
+ for shape in [(2, 3, 3), (2, 3, 2, 2)]:
+ data_tmp = np.random.normal(-0.1, 1, size=shape)
+
+ data = mx.symbol.Variable('data', stype=stype)
+ in_location = [mx.nd.array(data_tmp).tostype(stype)]
+
+ test = mx.symbol.Activation(data, act_type="relu")
+ check_numeric_gradient(test, in_location,
numeric_eps=1e-2,rtol=0.16, atol=1e-2)
+
+ stypes = ['row_sparse', 'default']
+ for stype in stypes:
+ check_activation_training(stype)
+
+def test_convolution():
+ def check_convolution_training(stype):
+ for shape in [(3, 3, 10),(3, 3, 10, 10)]:
+ data_tmp = np.random.randint(256, size=shape)
+ data = mx.symbol.Variable('data', stype=stype)
+
+ if np.array(shape).shape[0] == 3:
+ test = mx.symbol.Convolution(data=data, kernel=(3,),
stride=(2), num_filter=4)
+ weight_tmp = np.random.normal(-0.1, 0.1, size=(4, 3, 3))
+
+ elif np.array(shape).shape[0] == 4:
+ test = mx.symbol.Convolution(data=data, kernel=(3, 3),
stride=(2, 2), num_filter=4)
+ weight_tmp = np.random.normal(-0.1, 0.1, size=(4,3,3,3))
+ else:
+ return 0
+ bias_tmp = np.random.normal(0.1, 0.1, size=(4,))
+ in_location =
[mx.nd.array(data_tmp).tostype(stype),mx.nd.array(weight_tmp).tostype(stype),
mx.nd.array(bias_tmp).tostype(stype)]
+ # check_numeric_gradient(test, in_location,
numeric_eps=1e-2,rtol=1e-5, atol=1e-6)
+ check_numeric_gradient(test, in_location,
numeric_eps=1e-2,rtol=0.16, atol=1e-2)
+
+ stypes = ['row_sparse', 'default']
+ for stype in stypes:
+ check_convolution_training(stype)
+
+def test_Deconvolution():
+ def check_Deconvolution_training(stype):
+ for shape in [(3, 3, 10),(3, 3, 10, 10)]:
+ data_tmp = np.random.randint(256, size=shape)
+ data = mx.symbol.Variable('data', stype=stype)
+
+ if np.array(shape).shape[0] == 3:
+ test = mx.symbol.Deconvolution(data=data, kernel=(3,),
stride=(2), num_filter=4)
+ weight_tmp = np.random.normal(-0.1, 0.1, size=(3, 4, 3))
+
+ elif np.array(shape).shape[0] == 4:
+ test = mx.symbol.Deconvolution(data=data, kernel=(3, 3),
stride=(2, 2), num_filter=4)
+ weight_tmp = np.random.normal(-0.1, 0.1, size=(3,4,3,3))
+ else:
+ return 0
+ bias_tmp = np.random.normal(0.1, 0.1, size=(4,))
+ in_location =
[mx.nd.array(data_tmp).tostype(stype),mx.nd.array(weight_tmp).tostype(stype),
mx.nd.array(bias_tmp).tostype(stype)]
+ # check_numeric_gradient(test, in_location,
numeric_eps=1e-2,rtol=1e-5, atol=1e-6)
+ check_numeric_gradient(test, in_location,
numeric_eps=1e-2,rtol=0.16, atol=1e-2)
+
+ stypes = ['row_sparse', 'default']
+ for stype in stypes:
+ check_Deconvolution_training(stype)
+
+@with_seed()
+def test_LRN():
+ def check_LRN_training(stype):
+ for shape in [(3, 4, 5, 5)]:
+ data_tmp = np.random.normal(-0.1, 0.1, size=shape)
+ data = mx.symbol.Variable('data', stype=stype)
+ in_location = [mx.nd.array(data_tmp).tostype(stype)]
+
+ test = mx.symbol.LRN(data,nsize=3)
+ # check_numeric_gradient(test, in_location,
numeric_eps=1e-2,rtol=1e-5, atol=1e-6)
+ check_numeric_gradient(test, in_location,
numeric_eps=1e-2,rtol=0.16, atol=1e-2)
+
+ stypes = ['row_sparse', 'default']
+ for stype in stypes:
+ check_LRN_training(stype)
+
if __name__ == '__main__':
test_mkldnn_install()
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services