This is an automated email from the ASF dual-hosted git repository.

patriczhao pushed a commit to branch mkldnn-v1.0
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/mkldnn-v1.0 by this push:
     new 3706ece  add mkldnn softmax_output (#16222)
3706ece is described below

commit 3706eceb5ea14f46f65ff308d7c891c56f42541b
Author: rongzha1 <[email protected]>
AuthorDate: Tue Oct 1 21:25:09 2019 +0800

    add mkldnn softmax_output (#16222)
---
 src/operator/nn/mkldnn/mkldnn_ops-inl.h         | 13 +++++----
 src/operator/nn/mkldnn/mkldnn_softmax_output.cc | 39 +++++--------------------
 src/operator/softmax_output.cc                  |  7 +++--
 3 files changed, 19 insertions(+), 40 deletions(-)

diff --git a/src/operator/nn/mkldnn/mkldnn_ops-inl.h 
b/src/operator/nn/mkldnn/mkldnn_ops-inl.h
index 3713098..793aad7 100644
--- a/src/operator/nn/mkldnn/mkldnn_ops-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_ops-inl.h
@@ -44,12 +44,6 @@ namespace mxnet {
 namespace op {
 
 #if MXNET_USE_MKLDNN == 1
-/* For softmax_output */
-void MKLDNNSoftmaxOutputForward(const nnvm::NodeAttrs& attrs, const OpContext 
&ctx,
-                                const std::vector<NDArray> &in_data,
-                                const std::vector<OpReqType> &req,
-                                const std::vector<NDArray> &out_data);
-
 /* For sum */
 void MKLDNNSumForward(const nnvm::NodeAttrs& attrs, const OpContext &ctx,
                       const std::vector<NDArray> &inputs, const OpReqType &req,
@@ -121,6 +115,7 @@ void MKLDNNActivationForward(const nnvm::NodeAttrs& attrs, 
const OpContext &ctx,
 void MKLDNNActivationBackward(const nnvm::NodeAttrs& attrs, const OpContext 
&ctx,
                               const NDArray &out_grad, const NDArray &in_data,
                               const OpReqType &req, const NDArray &in_grad);
+
 void MKLDNNLeakyReluForward(const nnvm::NodeAttrs& attrs, const OpContext &ctx,
                             const NDArray &in_data, const OpReqType &req,
                             const NDArray &out_data);
@@ -133,6 +128,12 @@ void MKLDNNSoftmaxForward(const nnvm::NodeAttrs& attrs, 
const OpContext &ctx,
                           const NDArray &in_data, const OpReqType &req,
                           const NDArray &out_data);
 
+/* For softmax_output */
+void MKLDNNSoftmaxOutputForward(const nnvm::NodeAttrs& attrs, const OpContext 
&ctx,
+                                const std::vector<NDArray> &in_data,
+                                const std::vector<OpReqType> &req,
+                                const std::vector<NDArray> &out_data);
+
 void MKLDNNSum(const mkldnn::memory &arr1, const mkldnn::memory &arr2,
                const mkldnn::memory &out);
 
diff --git a/src/operator/nn/mkldnn/mkldnn_softmax_output.cc 
b/src/operator/nn/mkldnn/mkldnn_softmax_output.cc
index ae34fe6..981969b 100644
--- a/src/operator/nn/mkldnn/mkldnn_softmax_output.cc
+++ b/src/operator/nn/mkldnn/mkldnn_softmax_output.cc
@@ -23,19 +23,17 @@
  * \author Zhang Rong A
 */
 
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_MKLDNN == 100
 #include "../../softmax_output-inl.h"
 #include "./mkldnn_ops-inl.h"
 #include "./mkldnn_base-inl.h"
-
 namespace mxnet {
 namespace op {
 
 static mkldnn::softmax_forward::primitive_desc GetSoftmaxOutputFwdDescImpl(
                const SoftmaxOutputParam& param, bool is_train,
                const int axis, const mkldnn::memory &input_mem) {
-  mkldnn::memory::primitive_desc data_mpd = input_mem.get_primitive_desc();
-  mkldnn::memory::desc data_md = data_mpd.desc();
+  mkldnn::memory::desc data_md = input_mem.get_desc();
   auto cpu_engine = CpuEngine::Get()->get_engine();
   auto prop = is_train ? mkldnn::prop_kind::forward_training
                        : mkldnn::prop_kind::forward_scoring;
@@ -47,8 +45,6 @@ typedef ParamOpSign<SoftmaxOutputParam> 
MKLDNNSoftmaxOuputSignature;
 
 class MKLDNNSoftmaxOutputFwd {
   std::shared_ptr<mkldnn::softmax_forward> fwd_;
-  std::shared_ptr<mkldnn::memory> data_;
-  std::shared_ptr<mkldnn::memory> out_;
 
  public:
   const mkldnn::softmax_forward::primitive_desc fwd_pd;
@@ -56,29 +52,10 @@ class MKLDNNSoftmaxOutputFwd {
   MKLDNNSoftmaxOutputFwd(const SoftmaxOutputParam& param, bool is_train,
                          const int axis, const mkldnn::memory &mem): fwd_pd(
                          GetSoftmaxOutputFwdDescImpl(param, is_train, axis, 
mem)) {
+    fwd_ = std::make_shared<mkldnn::softmax_forward>(fwd_pd);
   }
 
-  void SetNewMem(const mkldnn::memory &data, const mkldnn::memory &output) {
-    if (this->data_ == nullptr)
-      this->data_ = std::shared_ptr<mkldnn::memory>(new mkldnn::memory(
-        data.get_primitive_desc(), data.get_data_handle()));
-    else
-      this->data_->set_data_handle(data.get_data_handle());
-
-    if (this->out_ == nullptr)
-      this->out_ = std::shared_ptr<mkldnn::memory>(new mkldnn::memory(
-        output.get_primitive_desc(), output.get_data_handle()));
-    else
-      this->out_->set_data_handle(output.get_data_handle());
-
-    if (this->fwd_ == nullptr) {
-      this->fwd_ = std::shared_ptr<mkldnn::softmax_forward>(
-        new mkldnn::softmax_forward(fwd_pd, 
mkldnn::primitive::at(*this->data_),
-        *this->out_));
-    }
-  }
-
-  const mkldnn::softmax_forward &GetFwd() const {
+  const inline mkldnn::softmax_forward &GetFwd() const {
     return *fwd_;
   }
 };
@@ -129,17 +106,17 @@ void MKLDNNSoftmaxOutputForward(const nnvm::NodeAttrs& 
attrs,
 
   auto input_mem = idata.GetMKLDNNData();
   auto out_mem = CreateMKLDNNMem(out_data[softmaxout_enum::kOut],
-                                 input_mem->get_primitive_desc(), 
req[softmaxout_enum::kOut]);
+                                 input_mem->get_desc(), 
req[softmaxout_enum::kOut]);
 
   MKLDNNSoftmaxOutputFwd &fwd = GetSoftmaxOutputForward(param, ctx, idata);
-  fwd.SetNewMem(*input_mem, *out_mem.second);
 
   MKLDNNStream *stream = MKLDNNStream::Get();
-  stream->RegisterPrim(fwd.GetFwd());
-
+  stream->RegisterPrimArgs(fwd.GetFwd(),
+                           {{MKLDNN_ARG_SRC, *input_mem}, {MKLDNN_ARG_DST, 
*out_mem.second}});
   CommitOutput(out_data[softmaxout_enum::kOut], out_mem);
   stream->Submit();
 }
 }   // namespace op
 }   // namespace mxnet
 #endif
+
diff --git a/src/operator/softmax_output.cc b/src/operator/softmax_output.cc
index 548225f..3f69d21 100644
--- a/src/operator/softmax_output.cc
+++ b/src/operator/softmax_output.cc
@@ -24,8 +24,9 @@
  * \author Bing Xu, Zhang Rong A
 */
 #include "./softmax_output-inl.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_MKLDNN == 100
 #include "./nn/mkldnn/mkldnn_ops-inl.h"
+#include "./nn/mkldnn/mkldnn_base-inl.h"
 #endif
 namespace mxnet {
 namespace op {
@@ -121,7 +122,7 @@ static bool SoftmaxOutputShape(const nnvm::NodeAttrs& attrs,
   return true;
 }
 
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_MKLDNN == 100
 inline static bool SoftmaxOutputStorageType(const nnvm::NodeAttrs& attrs,
                                             const int dev_mask,
                                             DispatchMode* dispatch_mode,
@@ -231,7 +232,7 @@ NNVM_REGISTER_OP(SoftmaxOutput)
 .set_num_inputs(2)
 .set_num_outputs(1)
 .set_attr_parser(ParamParser<SoftmaxOutputParam>)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_MKLDNN == 100
 .set_attr<FInferStorageType>("FInferStorageType", SoftmaxOutputStorageType)
 .set_attr<bool>("TIsMKLDNN", true)
 .set_attr<FComputeEx>("FComputeEx<cpu>", SoftmaxOutputComputeExCPU)

Reply via email to