This is an automated email from the ASF dual-hosted git repository.

jxie pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 92286c9  Test/mkl dnn act (#11026)
92286c9 is described below

commit 92286c9106dd63d2bfd062f9abb0e53b071a46e4
Author: Alexander Zai <[email protected]>
AuthorDate: Tue May 29 17:48:33 2018 -0700

    Test/mkl dnn act (#11026)
    
    * add mkl act unit test
    
    * fix operator name
    
    * use custom ndarray init
    
    * func missing param
    
    * add init fn for act operator test
    
    * remove extra white space
    
    * fix fetch relu operator
    
    * fix get  relu operator name
    
    * add assert abs in verify fn
    
    * remove unused operator
    
    * cast blob ptr to float
    
    * use parsed param
    
    * use attr_parser
    
    * fix header order
    
    * update test fn name
    
    * use relu fn
    
    * add kFComputeEx dispatch
    
    * init posneg mklarray
    
    * fix generating rnd pos neg ints
    
    * output arrays are rnd generated
    
    * test that getinputarrays creates view and mkldnn arrays
    
    * add more output types
    
    * fix typo
    
    * fix gettestput test
    
    * create arrattr struct to display arr info
    
    * refactor initarray
    
    * print arr description in verify fn
    
    * use long int string interpolation
    
    * fix alias params
    
    * iterate over dims
    
    * print c_str
    
    * print output info
    
    * improve print message
    
    * improve print
    
    * fix new lines in output
    
    * refactor print messages
    
    * fix typos
    
    * fix lint issues
    
    * fix rebase
    
    * pass ndarray as ptr
    
    * store copy of ndarray in attrs obj
    
    * fix rem inits
    
    * fix dispatch size
    
    * move print earlier
    
    * use createmkldnnmem helper fun
    
    * fix lint
    
    * refactor if else statement
    
    * use buffer ndarray
    
    * fix spacing
    
    * fix refactor
    
    * revert sum refactor
    
    * use fallback compute
    
    * fix typo
    
    * fix lint
    
    * use fallbackcompute fn for act operator
    
    * convert activation impl funcs to fxcompute std
    
    * remove unused var
    
    * move unused variable
    
    * fix indent
---
 src/operator/nn/activation-inl.h     |  44 +++---
 src/operator/nn/activation.cc        |   6 +-
 src/operator/nn/mkldnn/mkldnn_act.cc |  16 ++-
 tests/cpp/operator/mkldnn.cc         | 267 +++++++++++++++++++++++------------
 4 files changed, 213 insertions(+), 120 deletions(-)

diff --git a/src/operator/nn/activation-inl.h b/src/operator/nn/activation-inl.h
index a9f6dbe..e6f8915 100644
--- a/src/operator/nn/activation-inl.h
+++ b/src/operator/nn/activation-inl.h
@@ -120,59 +120,62 @@ void ActivationBackward(const OpContext &ctx, const TBlob 
&out_grad,
 }
 
 template<typename xpu>
-void ActivationComputeImpl(const ActivationParam &param, const OpContext &ctx,
-                           const TBlob &input, OpReqType req, const TBlob 
&output) {
+void ActivationComputeImpl(const nnvm::NodeAttrs& attrs, const OpContext &ctx,
+                           const std::vector<TBlob>& inputs, const 
std::vector<OpReqType>& req,
+                           const std::vector<TBlob>& outputs) {
+  const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
   switch (param.act_type) {
     case activation::kReLU:
       ActivationForward<xpu, mshadow_op::relu, mshadow_op::relu_grad>(
-          ctx, input, req, output);
+          ctx, inputs[0], req[0], outputs[0]);
       break;
     case activation::kSigmoid:
       ActivationForward<xpu, mshadow_op::sigmoid, mshadow_op::sigmoid_grad>(
-          ctx, input, req, output);
+          ctx, inputs[0], req[0], outputs[0]);
       break;
     case activation::kTanh:
       ActivationForward<xpu, mshadow_op::tanh, mshadow_op::tanh_grad>(
-          ctx, input, req, output);
+          ctx, inputs[0], req[0], outputs[0]);
       break;
     case activation::kSoftReLU:
       ActivationForward<xpu, mshadow_op::softrelu, mshadow_op::softrelu_grad>(
-          ctx, input, req, output);
+          ctx, inputs[0], req[0], outputs[0]);
       break;
     case activation::kSoftSign:
       ActivationForward<xpu, mshadow_op::softsign, mshadow_op::softsign_grad>(
-              ctx, input, req, output);
-          break;
+          ctx, inputs[0], req[0], outputs[0]);
+      break;
     default:
       LOG(FATAL) << "unknown activation type";
   }
 }
 
 template<typename xpu>
-void ActivationGradComputeImpl(const ActivationParam &param, const OpContext 
&ctx,
-                               const TBlob &out_grad, const TBlob &out_data,
-                               OpReqType req, const TBlob &output) {
+void ActivationGradComputeImpl(const nnvm::NodeAttrs& attrs, const OpContext 
&ctx,
+                           const std::vector<TBlob>& inputs, const 
std::vector<OpReqType>& req,
+                           const std::vector<TBlob>& outputs) {
+  const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
   switch (param.act_type) {
     case activation::kReLU:
       ActivationBackward<xpu, mshadow_op::relu, mshadow_op::relu_grad>(
-          ctx, out_grad, out_data, req, output);
+          ctx, inputs[0], inputs[1], req[0], outputs[0]);
       break;
     case activation::kSigmoid:
       ActivationBackward<xpu, mshadow_op::sigmoid, mshadow_op::sigmoid_grad>(
-          ctx, out_grad, out_data, req, output);
+          ctx, inputs[0], inputs[1], req[0], outputs[0]);
       break;
     case activation::kTanh:
       ActivationBackward<xpu, mshadow_op::tanh, mshadow_op::tanh_grad>(
-          ctx, out_grad, out_data, req, output);
+          ctx, inputs[0], inputs[1], req[0], outputs[0]);
       break;
     case activation::kSoftReLU:
       ActivationBackward<xpu, mshadow_op::softrelu, mshadow_op::softrelu_grad>(
-          ctx, out_grad, out_data, req, output);
+          ctx, inputs[0], inputs[1], req[0], outputs[0]);
       break;
     case activation::kSoftSign:
       ActivationBackward<xpu, mshadow_op::softsign, mshadow_op::softsign_grad>(
-              ctx, out_grad, out_data, req, output);
-          break;
+          ctx, inputs[0], inputs[1], req[0], outputs[0]);
+      break;
     default:
       LOG(FATAL) << "unknown activation type";
   }
@@ -186,8 +189,7 @@ void ActivationCompute(const nnvm::NodeAttrs& attrs,
                        const std::vector<TBlob>& outputs) {
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
-  const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
-  ActivationComputeImpl<xpu>(param, ctx, inputs[0], req[0], outputs[0]);
+  ActivationComputeImpl<xpu>(attrs, ctx, inputs, req, outputs);
 }
 
 template<typename xpu>
@@ -196,8 +198,8 @@ void ActivationGradCompute(const nnvm::NodeAttrs& attrs,
                            const std::vector<TBlob>& inputs,
                            const std::vector<OpReqType>& req,
                            const std::vector<TBlob>& outputs) {
-  const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
 #if (MXNET_USE_CUDNN == 1 || MXNET_USE_MKLDNN == 1)
+  const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
   bool relu = param.act_type == activation::kReLU;
   CHECK_EQ(inputs.size(), relu ? 2U : 3U);
 #else
@@ -205,7 +207,7 @@ void ActivationGradCompute(const nnvm::NodeAttrs& attrs,
 #endif
   CHECK_EQ(outputs.size(), 1U);
   CHECK_EQ(req.size(), 1U);
-  ActivationGradComputeImpl<xpu>(param, ctx, inputs[0], inputs[1], req[0], 
outputs[0]);
+  ActivationGradComputeImpl<xpu>(attrs, ctx, inputs, req, outputs);
 }
 
 }  // namespace op
diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc
index 595b891..d723bbe 100644
--- a/src/operator/nn/activation.cc
+++ b/src/operator/nn/activation.cc
@@ -62,7 +62,6 @@ static void ActivationComputeExCPU(const nnvm::NodeAttrs& 
attrs,
                                    const std::vector<NDArray>& inputs,
                                    const std::vector<OpReqType>& req,
                                    const std::vector<NDArray>& outputs) {
-  const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
   CHECK_EQ(inputs.size(), 1U);
   CHECK_EQ(outputs.size(), 1U);
   if (SupportMKLDNN(inputs[0])) {
@@ -71,7 +70,7 @@ static void ActivationComputeExCPU(const nnvm::NodeAttrs& 
attrs,
     MKLDNN_OPCHECK_RUN(ActivationCompute<cpu>, attrs, ctx, inputs, req, 
outputs);
     return;
   }
-  ActivationComputeImpl<cpu>(param, ctx, inputs[0].data(), req[0], 
outputs[0].data());
+  FallBackCompute(ActivationComputeImpl<cpu>, attrs, ctx, inputs, req, 
outputs);
 }
 
 void ActivationGradComputeExCPU(const nnvm::NodeAttrs& attrs,
@@ -90,8 +89,7 @@ void ActivationGradComputeExCPU(const nnvm::NodeAttrs& attrs,
      MKLDNN_OPCHECK_RUN(ActivationGradCompute<cpu>, attrs, ctx, inputs, req, 
outputs);
     return;
   }
-  ActivationGradComputeImpl<cpu>(param, ctx, inputs[0].data(), 
inputs[1].data(),
-                                 req[0], outputs[0].data());
+  FallBackCompute(ActivationGradComputeImpl<cpu>, attrs, ctx, inputs, req, 
outputs);
 }
 #endif
 
diff --git a/src/operator/nn/mkldnn/mkldnn_act.cc 
b/src/operator/nn/mkldnn/mkldnn_act.cc
index 50e742d..a278456 100644
--- a/src/operator/nn/mkldnn/mkldnn_act.cc
+++ b/src/operator/nn/mkldnn/mkldnn_act.cc
@@ -159,13 +159,19 @@ void MKLDNNActivationForward(const nnvm::NodeAttrs& 
attrs, const OpContext &ctx,
                              const NDArray &in_data, const OpReqType &req,
                              const NDArray &out_data) {
   const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
-  auto input_mem = in_data.GetMKLDNNData();
-  MKLDNNActForward &fwd = GetActForward(param, ctx, in_data, *input_mem);
-  auto out_mem = const_cast<NDArray &>(out_data).CreateMKLDNNData(
-      fwd.fwd_pd.dst_primitive_desc());
-  fwd.SetNewMem(*input_mem, *out_mem);
+
+  NDArray in_buffer = in_data;
+  if (in_data.IsView() && in_data.IsMKLDNNData())
+    in_buffer = in_data.Reorder2Default();
+
+  auto input_mem = in_buffer.GetMKLDNNData();
+  MKLDNNActForward &fwd = GetActForward(param, ctx, in_buffer, *input_mem);
+  auto out_mem = CreateMKLDNNMem(out_data, fwd.fwd_pd.dst_primitive_desc(),
+                                 req);
+  fwd.SetNewMem(*input_mem, *out_mem.second);
   MKLDNNStream *stream = MKLDNNStream::Get();
   stream->RegisterPrim(fwd.GetFwd());
+  CommitOutput(out_data, out_mem);
   stream->Submit();
 }
 
diff --git a/tests/cpp/operator/mkldnn.cc b/tests/cpp/operator/mkldnn.cc
index 1d29219..76872d5 100644
--- a/tests/cpp/operator/mkldnn.cc
+++ b/tests/cpp/operator/mkldnn.cc
@@ -25,6 +25,8 @@
 
 #if MXNET_USE_MKLDNN == 1
 
+#include <cmath>
+#include <climits>
 #include "gtest/gtest.h"
 #include "mxnet/imperative.h"
 #include "../../src/operator/nn/mkldnn/mkldnn_base-inl.h"
@@ -89,34 +91,43 @@ TEST(MKLDNN_UTIL_FUNC, MemFormat) {
 }
 
 // Init arrays with the default layout.
-static void InitArray(NDArray *arr, bool is_rand = false) {
+static void InitDefaultArray(NDArray *arr, bool is_rand = false) {
   const TBlob &blob = arr->data();
   mshadow::default_real_t *data = blob.dptr<mshadow::default_real_t>();
   size_t size = blob.Size();
-  if (is_rand) {
-    for (size_t i = 0; i < size; i++)
+  for (size_t i = 0; i < size; i++) {
+    if (is_rand) {
       data[i] = std::rand();
-  } else {
-    for (size_t i = 0; i < size; i++)
+    } else {
       data[i] = i;
+    }
   }
 }
 
-// Init arrays with the specified layout.
-static void InitMKLDNNArray(NDArray *arr, const mkldnn::memory::primitive_desc 
&pd,
-                            bool is_rand = false) {
+// Init arrays with negative and positive values
+static void InitNegPosArray(NDArray *arr, bool is_rand = false) {
   const TBlob &blob = arr->data();
   mshadow::default_real_t *data = blob.dptr<mshadow::default_real_t>();
-  size_t size = blob.Size();
-  if (is_rand) {
-    for (size_t i = 0; i < size; i++)
-      data[i] = std::rand();
-  } else {
-    for (size_t i = 0; i < size; i++)
-      data[i] = i;
-  }
-  arr->MKLDNNDataReorderAsync(pd);
-  arr->WaitToRead();
+  int size = blob.Size();
+
+  for (int i = 0; i < size; i++)
+    if (is_rand) {
+      data[i] = std::rand() - INT_MAX / 2;
+    } else {
+      size_t shift = size >> 1;
+      data[i] = i - shift;
+    }
+}
+
+using InitFunc = std::function<void (NDArray *arr, bool is_rand)>;
+using VerifyFunc = std::function<void (const std::vector<NDArray *> &in_arrs, 
const NDArray &arr)>;
+
+// Init arrays with the specified layout.
+static void InitMKLDNNArray(NDArray *arr, const mkldnn::memory::primitive_desc 
&pd,
+                            InitFunc init_fn, bool is_rand = false) {
+    init_fn(arr, is_rand);
+    arr->MKLDNNDataReorderAsync(pd);
+    arr->WaitToRead();
 }
 
 static void VerifyDefMem(const mkldnn::memory &mem) {
@@ -290,7 +301,7 @@ TEST(MKLDNN_NDArray, GetDataReorder) {
   // Reorder from the default to any other layout.
   for (auto s : shapes) {
     NDArray arr(s, Context());
-    InitArray(&arr);
+    InitDefaultArray(&arr);
     for (auto pd : pds) {
       if (s.Size() == pd.get_size() / sizeof(mshadow::default_real_t)) {
         const mkldnn::memory *mem = arr.GetMKLDNNDataReorder(pd);
@@ -322,7 +333,7 @@ TEST(MKLDNN_NDArray, GetDataReorder) {
         for (int i = 0; i < from_pd.desc().data.ndims; i++)
           printf("%d, ", from_pd.desc().data.dims[i]);
         printf("), format: %d\n", from_pd.desc().data.format);
-        InitMKLDNNArray(&arr, from_pd);
+        InitMKLDNNArray(&arr, from_pd, InitDefaultArray);
         for (auto to_pd : pds) {
           if (to_pd.get_size() / sizeof(mshadow::default_real_t) == s.Size()) {
             const mkldnn::memory *mem = arr.GetMKLDNNDataReorder(to_pd);
@@ -344,6 +355,12 @@ TEST(MKLDNN_NDArray, GetDataReorder) {
   }
 }
 
+struct NDArrayAttrs {
+  NDArray arr;
+  std::string desc;
+  NDArrayAttrs(NDArray arr, std::string desc) : arr(arr), desc(desc) {}
+};
+
 struct OpAttrs {
   nnvm::NodeAttrs attrs;
   std::vector<DispatchMode> dispatches;
@@ -358,6 +375,17 @@ OpAttrs GetCopyOp() {
   return attrs;
 }
 
+OpAttrs GetReluOp() {
+  OpAttrs attrs;
+  attrs.attrs.op = Op::Get("Activation");
+  attrs.attrs.dict.insert({"act_type", "relu"});
+  attrs.attrs.op->attr_parser(&attrs.attrs);
+  attrs.dispatches.resize(2);
+  attrs.dispatches[0] = DispatchMode::kFCompute;
+  attrs.dispatches[1] = DispatchMode::kFComputeEx;
+  return attrs;
+}
+
 OpAttrs GetLeakyReluOp() {
   OpAttrs attrs;
   attrs.attrs.op = Op::Get("LeakyReLU");
@@ -366,6 +394,7 @@ OpAttrs GetLeakyReluOp() {
   return attrs;
 }
 
+
 OpAttrs GetSumOp() {
   OpAttrs attrs;
   attrs.attrs.op = Op::Get("elemwise_add");
@@ -396,32 +425,54 @@ OpAttrs GetSumOp() {
  *    reordered to 5 dimensions.
  *
  */
-std::vector<NDArray> GetTestInputArrays() {
+std::vector<NDArrayAttrs> GetTestInputArrays(InitFunc init_fn) {
   TestArrayShapes tas = GetTestArrayShapes();
   std::vector<nnvm::TShape> shapes = tas.shapes;
   std::vector<mkldnn::memory::primitive_desc> pds = tas.pds;
 
-  std::vector<NDArray> in_arrs;
+  std::vector<NDArrayAttrs> in_arrs;
   for (auto shape : shapes) {
-    in_arrs.emplace_back(shape, Context());
-    InitArray(&in_arrs.back());
+    // Type 1.
+    NDArray arr(shape, Context());
+    in_arrs.emplace_back(arr, "Normal NDArray");
+    init_fn(&in_arrs.back().arr, false);
     for (auto pd : pds) {
       if (shape.Size() != pd.get_size() / sizeof(mshadow::default_real_t))
         continue;
 
-      in_arrs.emplace_back(shape, Context());
-      InitMKLDNNArray(&in_arrs.back(), pd);
+      // Type 2, 3.
+      arr = NDArray(shape, Context());
+      in_arrs.emplace_back(arr, "MKLDNN NDArray");
+      InitMKLDNNArray(&in_arrs.back().arr, pd, init_fn);
 
-      // Get a sliced version.
-      NDArray arr(shape, Context());
-      InitMKLDNNArray(&arr, pd);
-      arr = arr.Slice(1, arr.shape()[0] - 1);
-      in_arrs.emplace_back(arr);
+      // Type 4, 5, 6.
+      arr = NDArray(shape, Context());
+      InitMKLDNNArray(&arr, pd, init_fn);
+      in_arrs.emplace_back(arr.Slice(1, arr.shape()[0] - 1), "Reshaped MKLDNN 
NDArray");
     }
   }
   return in_arrs;
 }
 
+TEST(MKLDNN_NDArray, GetTestInputArrays) {
+  std::vector<NDArrayAttrs> in_arrs = GetTestInputArrays(InitDefaultArray);
+  int mkldnn_count = 0, mkldnn_view_count = 0;
+  for (auto arr : in_arrs) {
+    if (arr.arr.IsView() && arr.arr.IsMKLDNNData()) {
+      mkldnn_view_count++;
+      continue;
+    }
+
+    if (arr.arr.IsMKLDNNData()) {
+      mkldnn_count++;
+      continue;
+    }
+  }
+
+  EXPECT_GT(mkldnn_view_count, 0);
+  EXPECT_GT(mkldnn_count, 0);
+}
+
 /*
  * We want to get a few types of NDArrays for testing:
  * 1. Normal NDArray
@@ -440,19 +491,21 @@ std::vector<NDArray> GetTestInputArrays() {
  * 8. Reused NDArray with MKLDNN layout.
  * 9. Reused NDArray with MKLDNN layout of different dimensions.
  */
-std::vector<NDArray> GetTestOutputArrays(const TShape &shape,
-                                         const 
std::vector<mkldnn::memory::primitive_desc> &pds) {
-  std::vector<NDArray> in_arrs;
+std::vector<NDArrayAttrs> GetTestOutputArrays(const TShape &shape,
+                                         const 
std::vector<mkldnn::memory::primitive_desc> &pds,
+                                         const InitFunc init_fn) {
+  std::vector<NDArrayAttrs> in_arrs;
   // Type 1.
-  in_arrs.emplace_back(shape, Context());
-  InitArray(&in_arrs.back(), true);
+  NDArray arr(shape, Context());
+  in_arrs.emplace_back(arr, "Normal NDArray");
+  init_fn(&in_arrs.back().arr, true);
 
   // Type 4.
   TShape tmp_shape = shape;
   tmp_shape[0] = shape[0] * 2;
   NDArray arr0(tmp_shape, Context());
-  InitArray(&arr0, true);
-  in_arrs.emplace_back(arr0.Slice(1, shape[0] + 1));
+  init_fn(&arr0, true);
+  in_arrs.emplace_back(arr0.Slice(1, shape[0] + 1), "Reshaped NDArray");
 
   // Type 5.
   // Get a reused version.
@@ -460,31 +513,33 @@ std::vector<NDArray> GetTestOutputArrays(const TShape 
&shape,
   s[0] = shape.Size();
   NDArray arr1(s, Context());
   arr1 = arr1.AsArray(shape, arr1.dtype());
-  InitArray(&arr1, true);
-  in_arrs.emplace_back(arr1);
+  init_fn(&arr1, true);
+  in_arrs.emplace_back(arr1, "Reused NDArray");
 
   // Type 6.
   s[0] = shape.Size() * GetTypeSize(mshadow::default_type_flag);
   NDArray arr2(s, Context(), true, mshadow::kUint8);
   arr2 = arr2.AsArray(shape, mshadow::default_type_flag);
-  InitArray(&arr2, true);
-  in_arrs.emplace_back(arr2);
+  init_fn(&arr2, true);
+  in_arrs.emplace_back(arr2, "Reused NDArray with diff data type");
 
   // Type 7
   s[0] = shape.Size() * GetTypeSize(mshadow::default_type_flag) * 2;
   NDArray arr3(s, Context(), true, mshadow::kUint8);
   tmp_shape[0] = shape[0] * 2;
   arr3 = arr3.AsArray(tmp_shape, mshadow::default_type_flag);
-  InitArray(&arr3, true);
-  in_arrs.emplace_back(arr3.Slice(1, shape[0] + 1));
+  init_fn(&arr3, true);
+  in_arrs.emplace_back(arr3.Slice(1, shape[0] + 1), "Reused+Reshaped NDArray");
+
 
   for (auto pd : pds) {
     if (shape.Size() != pd.get_size() / sizeof(mshadow::default_real_t))
       continue;
 
     // Type 2, 3.
-    in_arrs.emplace_back(shape, Context());
-    InitMKLDNNArray(&in_arrs.back(), pd, true);
+    arr = NDArray(shape, Context());
+    in_arrs.emplace_back(arr, "MKLDNN NDArray");
+    InitMKLDNNArray(&in_arrs.back().arr, pd, init_fn, true);
 
     // Type 8, 9.
     // Get a reused version.
@@ -492,14 +547,12 @@ std::vector<NDArray> GetTestOutputArrays(const TShape 
&shape,
     s[0] = shape.Size();
     NDArray arr = NDArray(s, Context());
     arr = arr.AsArray(shape, arr.dtype());
-    InitMKLDNNArray(&arr, pd, true);
-    in_arrs.emplace_back(arr);
+    InitMKLDNNArray(&arr, pd, init_fn, true);
+    in_arrs.emplace_back(arr, "Reused MKLDNN NDArray");
   }
   return in_arrs;
 }
 
-using VerifyFunc = std::function<void (const std::vector<NDArray *> &in_arrs, 
const NDArray &arr)>;
-
 void VerifyCopyResult(const std::vector<NDArray *> &in_arrs, const NDArray 
&arr) {
   NDArray tmp1 = in_arrs[0]->Reorder2Default();
   NDArray tmp2 = arr.Reorder2Default();
@@ -510,6 +563,19 @@ void VerifyCopyResult(const std::vector<NDArray *> 
&in_arrs, const NDArray &arr)
                    tmp1.shape().Size() * sizeof(mshadow::default_real_t)), 0);
 }
 
+void VerifyActResult(const std::vector<NDArray *> &in_arrs, const NDArray 
&arr) {
+  NDArray tmp1 = in_arrs[0]->Reorder2Default();
+  NDArray tmp2 = arr.Reorder2Default();
+  TBlob blob1 = tmp1.data();
+  TBlob blob2 = tmp2.data();
+  mshadow::default_real_t *d1 = 
static_cast<mshadow::default_real_t*>(blob1.dptr_);
+  mshadow::default_real_t *d2 = 
static_cast<mshadow::default_real_t*>(blob2.dptr_);
+  EXPECT_EQ(tmp1.shape().Size(), tmp2.shape().Size());
+  for (size_t i = 0; i < tmp1.shape().Size(); i++) {
+    EXPECT_EQ(d1[i], std::fmax(d2[i], 0));
+  }
+}
+
 void VerifySumResult(const std::vector<NDArray *> &in_arrs, const NDArray 
&arr) {
   NDArray in1 = in_arrs[0]->Reorder2Default();
   NDArray in2 = in_arrs[1]->Reorder2Default();
@@ -524,27 +590,41 @@ void VerifySumResult(const std::vector<NDArray *> 
&in_arrs, const NDArray &arr)
     EXPECT_EQ(d1[i] + d2[i], o[i]);
 }
 
+void PrintVerifyMsg(const NDArrayAttrs &arr1, const NDArrayAttrs &arr2) {
+  TShape t1 = arr1.arr.shape();
+  TShape t2 = arr2.arr.shape();
+
+  printf("Verifying: %s (", arr1.desc.c_str());
+  for (size_t i = 0; i < t1.ndim(); i++)
+    printf("%ld, ", t1[i]);
+  printf(") with %s (", arr2.desc.c_str());
+  for (size_t i = 0; i < t2.ndim(); i++)
+    printf("%ld, ", t2[i]);
+  printf(")\n");
+}
+
 TEST(MKLDNN_NDArray, CopyFrom) {
   TestArrayShapes tas = GetTestArrayShapes();
   std::vector<mkldnn::memory::primitive_desc> pds = tas.pds;
 
-  std::vector<NDArray> in_arrs = GetTestInputArrays();
+  std::vector<NDArrayAttrs> in_arrs = GetTestInputArrays(InitDefaultArray);
   for (auto in_arr : in_arrs) {
-    std::vector<NDArray> out_arrs = GetTestOutputArrays(in_arr.shape(), pds);
+    std::vector<NDArrayAttrs> out_arrs = 
GetTestOutputArrays(in_arr.arr.shape(), pds,
+        InitDefaultArray);
     for (auto out_arr : out_arrs) {
-      if (in_arr.IsMKLDNNData() && in_arr.IsView())
-        in_arr = in_arr.Reorder2Default();
-      const mkldnn::memory *mem = in_arr.GetMKLDNNData();
-      out_arr.CopyFrom(*mem);
+      if (in_arr.arr.IsMKLDNNData() && in_arr.arr.IsView())
+        in_arr.arr = in_arr.arr.Reorder2Default();
+      const mkldnn::memory *mem = in_arr.arr.GetMKLDNNData();
+      out_arr.arr.CopyFrom(*mem);
       MKLDNNStream::Get()->Submit();
       std::vector<NDArray *> inputs(1);
-      inputs[0] = &in_arr;
-      VerifyCopyResult(inputs, out_arr);
+      inputs[0] = &in_arr.arr;
+      VerifyCopyResult(inputs, out_arr.arr);
     }
   }
 }
 
-void TestUnaryOp(const OpAttrs &attrs, VerifyFunc verify_fn) {
+void TestUnaryOp(const OpAttrs &attrs, InitFunc init_fn, VerifyFunc verify_fn) 
{
   std::vector<NDArray*> inputs(1);
   std::vector<NDArray*> outputs(1);
   std::vector<OpReqType> req(1);
@@ -553,38 +633,40 @@ void TestUnaryOp(const OpAttrs &attrs, VerifyFunc 
verify_fn) {
   TestArrayShapes tas = GetTestArrayShapes();
   std::vector<mkldnn::memory::primitive_desc> pds = tas.pds;
 
-  std::vector<NDArray> in_arrs = GetTestInputArrays();
+  std::vector<NDArrayAttrs> in_arrs = GetTestInputArrays(init_fn);
   for (auto in_arr : in_arrs) {
     for (auto dispatch : dispatches) {
-      std::vector<NDArray> out_arrs = GetTestOutputArrays(in_arr.shape(), pds);
+      std::vector<NDArrayAttrs> out_arrs = 
GetTestOutputArrays(in_arr.arr.shape(), pds, init_fn);
       for (auto out_arr : out_arrs) {
         req[0] = kWriteTo;
-        inputs[0] = &in_arr;
-        outputs[0] = &out_arr;
+        inputs[0] = &in_arr.arr;
+        outputs[0] = &out_arr.arr;
+        PrintVerifyMsg(in_arr, out_arr);
         Imperative::Get()->InvokeOp(Context(), attrs.attrs, inputs,
                                     outputs, req, dispatch, 
mxnet::OpStatePtr());
-        out_arr.WaitToRead();
-        verify_fn(inputs, out_arr);
+        out_arr.arr.WaitToRead();
+        verify_fn(inputs, *outputs[0]);
       }
     }
   }
 
   for (auto dispatch : dispatches) {
-    in_arrs = GetTestInputArrays();
+    in_arrs = GetTestInputArrays(init_fn);
     for (auto arr : in_arrs) {
       // If the array is a view, we shouldn't write data to it.
-      if (arr.IsView())
+      if (arr.arr.IsView())
         continue;
 
-      NDArray orig = arr.Copy(arr.ctx());
+      NDArrayAttrs orig(arr.arr.Copy(arr.arr.ctx()), "InPlace Copy");
       req[0] = kWriteInplace;
-      inputs[0] = &arr;
-      outputs[0] = &arr;
+      inputs[0] = &arr.arr;
+      outputs[0] = &arr.arr;
+      PrintVerifyMsg(orig, arr);
       Imperative::Get()->InvokeOp(Context(), attrs.attrs, inputs, outputs, req,
                                   dispatch, mxnet::OpStatePtr());
-      arr.WaitToRead();
-      inputs[0] = &orig;
-      verify_fn(inputs, arr);
+      arr.arr.WaitToRead();
+      inputs[0] = &orig.arr;
+      verify_fn(inputs, *outputs[0]);
     }
   }
 }
@@ -598,51 +680,56 @@ void TestBinaryOp(const OpAttrs &attrs, VerifyFunc 
verify_fn) {
   TestArrayShapes tas = GetTestArrayShapes();
   std::vector<mkldnn::memory::primitive_desc> pds = tas.pds;
 
-  std::vector<NDArray> in_arrs = GetTestInputArrays();
+  std::vector<NDArrayAttrs> in_arrs = GetTestInputArrays(InitDefaultArray);
   for (auto in_arr1 : in_arrs) {
     for (auto dispatch : dispatches) {
-      std::vector<NDArray> out_arrs = GetTestOutputArrays(in_arr1.shape(), 
pds);
+      std::vector<NDArrayAttrs> out_arrs = 
GetTestOutputArrays(in_arr1.arr.shape(), pds,
+          InitDefaultArray);
       for (auto out_arr : out_arrs) {
         req[0] = kWriteTo;
-        inputs[0] = &in_arr1;
-        inputs[1] = &in_arr1;
-        outputs[0] = &out_arr;
+        inputs[0] = &in_arr1.arr;
+        inputs[1] = &in_arr1.arr;
+        outputs[0] = &out_arr.arr;
         Imperative::Get()->InvokeOp(Context(), attrs.attrs, inputs,
                                     outputs, req, dispatch, 
mxnet::OpStatePtr());
-        out_arr.WaitToRead();
-        verify_fn(inputs, out_arr);
+        out_arr.arr.WaitToRead();
+        verify_fn(inputs, out_arr.arr);
       }
     }
   }
 
   for (auto dispatch : dispatches) {
-    in_arrs = GetTestInputArrays();
+    in_arrs = GetTestInputArrays(InitDefaultArray);
     for (auto arr : in_arrs) {
       // If the array is a view, we shouldn't write data to it.
-      if (arr.IsView())
+      if (arr.arr.IsView())
         continue;
 
-      NDArray orig = arr.Copy(arr.ctx());
+      NDArray orig = arr.arr.Copy(arr.arr.ctx());
       req[0] = kWriteInplace;
-      inputs[0] = &arr;
-      inputs[1] = &arr;
-      outputs[0] = &arr;
+      inputs[0] = &arr.arr;
+      inputs[1] = &arr.arr;
+      outputs[0] = &arr.arr;
       Imperative::Get()->InvokeOp(Context(), attrs.attrs, inputs, outputs, req,
                                   dispatch, mxnet::OpStatePtr());
-      arr.WaitToRead();
-      std::vector<NDArray *> orig_inputs(2);
+      arr.arr.WaitToRead();
+      std::vector<NDArray*> orig_inputs(2);
       orig_inputs[0] = &orig;
       orig_inputs[1] = &orig;
-      verify_fn(orig_inputs, arr);
+      verify_fn(orig_inputs, arr.arr);
     }
   }
 }
 
 TEST(IMPERATIVE, UnaryOp) {
   OpAttrs attrs = GetCopyOp();
-  TestUnaryOp(attrs, VerifyCopyResult);
+  TestUnaryOp(attrs, InitDefaultArray, VerifyCopyResult);
 }
 
+TEST(IMPERATIVE, ActOp) {
+  OpAttrs attrs = GetReluOp();
+  TestUnaryOp(attrs, InitNegPosArray, VerifyActResult);
+}
 
 TEST(IMPERATIVE, BinaryOp) {
   OpAttrs attrs = GetSumOp();

-- 
To stop receiving notification emails like this one, please contact
[email protected].

Reply via email to