[GitHub] TaoLv commented on a change in pull request #10317: [MXNET-264] Improve performance of MKLDNN in small batch sizes.

2018-04-05 Thread GitBox
TaoLv commented on a change in pull request #10317: [MXNET-264] Improve 
performance of MKLDNN in small batch sizes.
URL: https://github.com/apache/incubator-mxnet/pull/10317#discussion_r179377502
 
 

 ##
 File path: src/ndarray/ndarray.cc
 ##
 @@ -353,23 +340,19 @@ bool NDArray::Chunk::IsDefault() const {
   // format.
   if (mkl_mem_ == nullptr)
 return true;
-  auto desc = mkl_mem_->get_primitive_desc().desc();
-  return desc.data.format == GetDefaultFormat(desc);
+  return !mkl_mem_->IsMKLDNN();
 }
 
 void NDArray::Chunk::Reorder2Default() {
   if (mkl_mem_ == nullptr)
 return;
 
-  auto format = GetDefaultFormat(mkl_mem_->get_primitive_desc().desc());
-  CHECK(format != mkl_mem_->get_primitive_desc().desc().data.format);
+  auto format = mkl_mem_->GetDefaultFormat();
+  CHECK(format != mkl_mem_->GetFormat());
 
 Review comment:
   CHECK_NE()


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] TaoLv commented on a change in pull request #10317: [MXNET-264] Improve performance of MKLDNN in small batch sizes.

2018-04-05 Thread GitBox
TaoLv commented on a change in pull request #10317: [MXNET-264] Improve 
performance of MKLDNN in small batch sizes.
URL: https://github.com/apache/incubator-mxnet/pull/10317#discussion_r179379623
 
 

 ##
 File path: src/operator/nn/mkldnn/mkldnn_base-inl.h
 ##
 @@ -334,11 +334,104 @@ const mkldnn::memory *GetWeights(const NDArray &arr,
  const mkldnn::memory::primitive_desc 
&target_pd,
  int num_groups);
 
-mkldnn_memory_format_t GetDefaultFormat(mkldnn::memory::desc desc);
+mkldnn_memory_format_t GetDefaultFormat(const mkldnn::memory::desc &desc);
 mkldnn_memory_format_t GetDefaultFormat(int num_dims);
 mkldnn::memory::primitive_desc GetPrimitiveDesc(mkldnn::memory::primitive_desc 
pd,
 mkldnn_memory_format_t format);
 
+static inline bool same_shape(const TShape &shape, const mkldnn_dims_t dims, 
int ndims) {
+  if (shape.ndim() != (size_t)ndims)
+return false;
+  for (int i = 0; i < ndims; i++)
+if (shape[i] != dims[i])
+  return false;
+  return true;
+}
+
+static inline bool same_shape(const TShape &shape, int dtype,
+  const mkldnn::memory::desc &desc) {
+  return same_shape(shape, desc.data.dims, desc.data.ndims)
+  && get_mkldnn_type(dtype) == desc.data.data_type;
+}
+
+/*
+ * There is a large overhead of getting mkldnn::memory::primitive_desc from
+ * mkldnn::memory. This class is created to cache the metadata of mkldnn memory
+ * to provide a much more lightweight method to access them.
+ */
+class MKLDNNMemory {
+  std::shared_ptr mem;
+  mkldnn::memory::desc desc;
+  size_t size;  // The number of bytes.
+
+ public:
+  MKLDNNMemory(mkldnn::memory::primitive_desc pd, void *addr): desc(pd.desc()) 
{
+mem.reset(new mkldnn::memory(pd, addr));
+size = pd.get_size();
+  }
+
+  explicit MKLDNNMemory(std::shared_ptr mem): desc(
+  mem->get_primitive_desc().desc()) {
+this->mem = mem;
+auto pd = mem->get_primitive_desc();
+size = pd.get_size();
+  }
+
+  void SetDataHandle(void *handle) {
+mem->set_data_handle(handle);
+  }
+
+  void *GetDataHandle() const {
+return mem->get_data_handle();
+  }
+
+  std::shared_ptr GetMem() const {
+return mem;
+  }
+
+  mkldnn::memory *GetRaw() const {
+return mem.get();
+  }
+
+  size_t GetSize() const {
+return size;
+  }
+
+  mkldnn::memory::primitive_desc GetPrimitiveDesc() const {
+return mem->get_primitive_desc();
+  }
+
+  mkldnn::memory::primitive_desc GetPrimitiveDesc(mkldnn_memory_format_t 
format) const {
+return mxnet::GetPrimitiveDesc(mem->get_primitive_desc(), format);
+  }
+
+  mkldnn_memory_format_t GetDefaultFormat() const {
+return mxnet::GetDefaultFormat(desc);
+  }
+
+  mkldnn_memory_format_t GetFormat() const {
+return desc.data.format;
+  }
+
+  bool IsMKLDNN() const {
+return GetFormat() != GetDefaultFormat();
+  }
+
+  bool SameFormat(mkldnn::memory::primitive_desc pd) const {
+return mem->get_primitive_desc() == pd;
+  }
+
+  bool SameFormat(const TShape &shape, int dtype) const {
+return same_shape(shape, dtype, desc);
+  }
+
+  void ReorderTo(mkldnn::memory *other) const {
+std::vector net;
+net.push_back(mkldnn::reorder(*mem, *other));
+mkldnn::stream(mkldnn::stream::kind::eager).submit(net).wait();
 
 Review comment:
   Why not use MKLDNNStream here?


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] TaoLv commented on a change in pull request #10317: [MXNET-264] Improve performance of MKLDNN in small batch sizes.

2018-04-05 Thread GitBox
TaoLv commented on a change in pull request #10317: [MXNET-264] Improve 
performance of MKLDNN in small batch sizes.
URL: https://github.com/apache/incubator-mxnet/pull/10317#discussion_r179380591
 
 

 ##
 File path: src/operator/nn/mkldnn/mkldnn_concat.cc
 ##
 @@ -39,18 +100,20 @@ void MKLDNNConcatForward(const nnvm::NodeAttrs& attrs, 
const OpContext &ctx,
   int num_in_data = param.num_args;
   int concat_dim = param.dim;
   std::vector data_md;
-  std::vector data_mem;
+  std::vector data_mem;
+  data_md.reserve(num_in_data);
+  data_mem.reserve(num_in_data);
   for (int i =0; i < num_in_data; i++) {
   auto tmp_mem = in_data[i].GetMKLDNNData();
 
 Review comment:
   Please help fix the indents here.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] TaoLv commented on a change in pull request #10317: [MXNET-264] Improve performance of MKLDNN in small batch sizes.

2018-04-05 Thread GitBox
TaoLv commented on a change in pull request #10317: [MXNET-264] Improve 
performance of MKLDNN in small batch sizes.
URL: https://github.com/apache/incubator-mxnet/pull/10317#discussion_r179380216
 
 

 ##
 File path: src/operator/nn/mkldnn/mkldnn_concat.cc
 ##
 @@ -30,6 +30,67 @@
 namespace mxnet {
 namespace op {
 
+class MKLDNNCcForward {
 
 Review comment:
   Is 'Cc' a short name for concat? Please find a more proper name for this 
class.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] TaoLv commented on a change in pull request #10317: [MXNET-264] Improve performance of MKLDNN in small batch sizes.

2018-04-05 Thread GitBox
TaoLv commented on a change in pull request #10317: [MXNET-264] Improve 
performance of MKLDNN in small batch sizes.
URL: https://github.com/apache/incubator-mxnet/pull/10317#discussion_r179379302
 
 

 ##
 File path: src/operator/nn/mkldnn/mkldnn_base-inl.h
 ##
 @@ -334,11 +334,104 @@ const mkldnn::memory *GetWeights(const NDArray &arr,
  const mkldnn::memory::primitive_desc 
&target_pd,
  int num_groups);
 
-mkldnn_memory_format_t GetDefaultFormat(mkldnn::memory::desc desc);
+mkldnn_memory_format_t GetDefaultFormat(const mkldnn::memory::desc &desc);
 mkldnn_memory_format_t GetDefaultFormat(int num_dims);
 mkldnn::memory::primitive_desc GetPrimitiveDesc(mkldnn::memory::primitive_desc 
pd,
 mkldnn_memory_format_t format);
 
+static inline bool same_shape(const TShape &shape, const mkldnn_dims_t dims, 
int ndims) {
+  if (shape.ndim() != (size_t)ndims)
+return false;
+  for (int i = 0; i < ndims; i++)
+if (shape[i] != dims[i])
+  return false;
+  return true;
+}
+
+static inline bool same_shape(const TShape &shape, int dtype,
+  const mkldnn::memory::desc &desc) {
+  return same_shape(shape, desc.data.dims, desc.data.ndims)
+  && get_mkldnn_type(dtype) == desc.data.data_type;
+}
+
+/*
+ * There is a large overhead of getting mkldnn::memory::primitive_desc from
+ * mkldnn::memory. This class is created to cache the metadata of mkldnn memory
+ * to provide a much more lightweight method to access them.
+ */
+class MKLDNNMemory {
+  std::shared_ptr mem;
+  mkldnn::memory::desc desc;
+  size_t size;  // The number of bytes.
+
+ public:
+  MKLDNNMemory(mkldnn::memory::primitive_desc pd, void *addr): desc(pd.desc()) 
{
+mem.reset(new mkldnn::memory(pd, addr));
+size = pd.get_size();
+  }
+
+  explicit MKLDNNMemory(std::shared_ptr mem): desc(
+  mem->get_primitive_desc().desc()) {
+this->mem = mem;
+auto pd = mem->get_primitive_desc();
+size = pd.get_size();
+  }
+
+  void SetDataHandle(void *handle) {
+mem->set_data_handle(handle);
+  }
+
+  void *GetDataHandle() const {
+return mem->get_data_handle();
+  }
+
+  std::shared_ptr GetMem() const {
+return mem;
+  }
+
+  mkldnn::memory *GetRaw() const {
+return mem.get();
+  }
+
+  size_t GetSize() const {
+return size;
+  }
+
+  mkldnn::memory::primitive_desc GetPrimitiveDesc() const {
+return mem->get_primitive_desc();
+  }
+
+  mkldnn::memory::primitive_desc GetPrimitiveDesc(mkldnn_memory_format_t 
format) const {
+return mxnet::GetPrimitiveDesc(mem->get_primitive_desc(), format);
+  }
+
+  mkldnn_memory_format_t GetDefaultFormat() const {
+return mxnet::GetDefaultFormat(desc);
+  }
+
+  mkldnn_memory_format_t GetFormat() const {
+return desc.data.format;
+  }
+
+  bool IsMKLDNN() const {
+return GetFormat() != GetDefaultFormat();
+  }
+
+  bool SameFormat(mkldnn::memory::primitive_desc pd) const {
 
 Review comment:
   nit: maybe `HaveSameFormat` is a better name.


This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services