bgawrych commented on a change in pull request #20825:
URL: https://github.com/apache/incubator-mxnet/pull/20825#discussion_r805844929



##########
File path: src/operator/contrib/adaptive_avg_pooling.cc
##########
@@ -238,18 +277,45 @@ void AdaptiveAvgPoolComputeExCPU(const nnvm::NodeAttrs& 
attrs,
   oneDNN doesn't support adaptive pooling.
   Fallback is needed when padding is not equal 0;
   */
-  const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
   if (SupportDNNL(inputs[0]) && SupportDNNLAveragePooling(inputs[0], 
outputs[0])) {
-    const NDArray* workspace = nullptr;
     DNNL_OPCHECK_INIT(false, 1, inputs, outputs);
-    DNNLPoolingCompute(ctx, param, inputs[0], req[0], outputs[0], workspace, 
true);
+    DNNLRun(DNNLPoolingCompute, attrs, ctx, inputs, req, outputs);
     DNNL_OPCHECK_RUN(PoolingCompute<cpu>, attrs, ctx, inputs, req, outputs);
     return;
   }
   FallBackCompute(AdaptiveAvgPoolOpForward<cpu>, attrs, ctx, inputs, req, 
outputs);
 }
 #endif
 
+inline static bool AdaptivePoolingStorageType(const nnvm::NodeAttrs& attrs,
+                                              const int dev_mask,
+                                              DispatchMode* dispatch_mode,
+                                              std::vector<int>* in_attrs,
+                                              std::vector<int>* out_attrs) {
+  CHECK_EQ(in_attrs->size(), 1);
+  bool dispatched = false;
+#if MXNET_USE_ONEDNN == 1
+  if (!dispatched) {
+    dispatched = DNNLStorageType(attrs, dev_mask, true, dispatch_mode, 
in_attrs, out_attrs);
+  }
+  if (!DNNLEnvSet()) {
+    *dispatch_mode = DispatchMode::kFComputeFallback;
+  }
+#else
+  for (int& v : *in_attrs)
+    if (v == -1)
+      v = kDefaultStorage;
+  if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
+    dispatched =
+        storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, 
DispatchMode::kFCompute);
+  }
+  if (!dispatched) {
+    dispatched = dispatch_fallback(out_attrs, dispatch_mode);
+  }
+#endif
+  return dispatched;
+}

Review comment:
       Why you're using this function? Isn't call to DNNLStorageType enough 
like in other ops (e.g. src/operator/nn/pooling.cc:355)?

##########
File path: src/operator/contrib/adaptive_avg_pooling.cc
##########
@@ -169,6 +171,67 @@ void AdaptiveAvgPoolUpdateOutput(mshadow::Stream<cpu>* s,
   }
 }
 
+#if MXNET_USE_ONEDNN == 1
+bool SupportDNNLAveragePooling(const NDArray& in_data, const NDArray& 
out_data) {
+  for (int64_t idx = 2; idx < in_data.shape().ndim(); ++idx) {
+    const int s1 = in_data.shape()[idx];
+    const int s2 = out_data.shape()[idx];
+    if (s2 == 0) {
+      return false;
+    }
+    if (s1 % s2 != 0) {
+      return false;
+    }
+  }
+  const int IH         = in_data.shape()[2];
+  const int IW         = in_data.shape()[3];
+  const int OH         = out_data.shape()[2];
+  const int OW         = out_data.shape()[3];
+  const int strides_H  = ((IH << 1) / OH) - (IH / OH);
+  const int strides_W  = ((IW << 1) / OW) - (IW / OW);
+  const int kernel_H   = DIV_ROUND_UP((IH << 1) / OH, 1) - (IH / OH);
+  const int kernel_W   = DIV_ROUND_UP((IW << 1) / OW, 1) - (IW / OW);
+  const int pad_l_top  = (strides_H * (OH - 1) + kernel_H - IH) / 2;
+  const int pad_l_left = (strides_W * (OW - 1) + kernel_W - IW) / 2;
+  return pad_l_top == 0 && pad_l_left == 0;
+}
+
+void AdaptiveAvgPoolOpBackwardExCPU(const nnvm::NodeAttrs& attrs,
+                                    const OpContext& ctx,
+                                    const std::vector<NDArray>& inputs,
+                                    const std::vector<OpReqType>& req,
+                                    const std::vector<NDArray>& outputs) {
+  // Pooling does not currently support working with views
+  if (inputs[0].IsView() || outputs[0].IsView()) {
+    FallBackCompute(AdaptiveAvgPoolOpBackward<cpu>, attrs, ctx, inputs, req, 
outputs);
+    return;
+  }

Review comment:
       This is probably not necessary anymore as you're using DNNLRun function

##########
File path: src/operator/contrib/adaptive_avg_pooling.cc
##########
@@ -262,13 +328,14 @@ The pooling kernel and stride sizes are automatically 
chosen for desired output
   (N x C x height x width) for any input (NCHW).
 
 )code" ADD_FILELINE)
-    .set_attr_parser(ParamParser<PoolingParam>)
+    .set_attr_parser(PoolingParamParser)
     .set_num_inputs(1)
     .set_num_outputs(1)
     .set_attr<mxnet::FInferShape>("FInferShape", AdaptiveAvgPoolOpInferShape)
     .set_attr<FCompute>("FCompute<cpu>", AdaptiveAvgPoolOpForward<cpu>)
     .set_attr<nnvm::FGradient>("FGradient",
                                
ElemwiseGradUseNone{"_backward_contrib_AdaptiveAvgPooling2D"})
+    .set_attr<FInferStorageType>("FInferStorageType", 
AdaptivePoolingStorageType)

Review comment:
       This can be put in MXNET_USE_ONEDNN compilation scope 

##########
File path: src/operator/nn/pooling.cc
##########
@@ -294,7 +295,6 @@ void PoolingComputeExCPU(const nnvm::NodeAttrs& attrs,
                          const std::vector<OpReqType>& req,
                          const std::vector<NDArray>& outputs) {
   const PoolingParam& param = nnvm::get<PoolingParam>(attrs.parsed);
-  const NDArray* workspace  = nullptr;
 
   // Pooling does not currently support working with views
   if (inputs[0].IsView() || outputs[0].IsView()) {

Review comment:
       Same as above

##########
File path: src/operator/contrib/adaptive_avg_pooling.cc
##########
@@ -277,10 +344,35 @@ The pooling kernel and stride sizes are automatically 
chosen for desired output
     .add_arguments(PoolingParam::__FIELDS__());
 
 NNVM_REGISTER_OP(_backward_contrib_AdaptiveAvgPooling2D)
-    .set_attr_parser(ParamParser<PoolingParam>)
+    .set_attr_parser(PoolingParamParser)
     .set_num_inputs(1)
     .set_num_outputs(1)
     .set_attr<nnvm::TIsBackward>("TIsBackward", true)
+#if MXNET_USE_ONEDNN == 1
+    .set_attr<FInferStorageType>("FInferStorageType", 
BackwardAdaptivePoolingStorageType)
+    // Different backend requires different FInplaceOption
+    .set_attr<nnvm::FInplaceOption>("FInplaceOption",
+                                    [](const NodeAttrs& attrs) {
+                                      const PoolingParam& param =
+                                          
nnvm::get<PoolingParam>(attrs.parsed);
+                                      if (DNNLRequireWorkspace(param) && 
param.IsAdaptivePooling())
+                                        return std::vector<std::pair<int, 
int>>{{1, 0}};
+                                      return std::vector<std::pair<int, 
int>>();
+                                    })
+    .set_attr<FResourceRequest>("FResourceRequest",
+                                [](const NodeAttrs& n) {
+                                  return 
std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
+                                })
+#else
+    .set_attr<nnvm::FInplaceOption>("FInplaceOption",
+                                    [](const NodeAttrs& attrs) {
+                                      return std::vector<std::pair<int, 
int>>();
+                                    })
+#endif
+#if MXNET_USE_ONEDNN == 1
+    .set_attr<bool>("TIsDNNL", true)
+    .set_attr<FComputeEx>("FComputeEx<cpu>", AdaptiveAvgPoolOpBackwardExCPU)
+#endif

Review comment:
       Use only one MXNET_USE_ONEDNN condition




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to