This is an automated email from the ASF dual-hosted git repository.

bgawrych pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new bdcf137  Make convolution operator fully work with oneDNN v2.4+ 
(#20847)
bdcf137 is described below

commit bdcf1370ab8f76d66aee1599fbd021a3660fdd66
Author: bartekkuncer <[email protected]>
AuthorDate: Tue Feb 1 12:15:02 2022 +0100

    Make convolution operator fully work with oneDNN v2.4+ (#20847)
    
    * Restore full functionality to convolution
    
    * Update src/operator/nn/dnnl/dnnl_convolution.cc
    
    Co-authored-by: bgawrych <[email protected]>
    
    Co-authored-by: bgawrych <[email protected]>
---
 src/operator/nn/dnnl/dnnl_convolution.cc       | 16 ++++++++++++----
 tests/python/dnnl/subgraphs/subgraph_common.py |  5 +----
 2 files changed, 13 insertions(+), 8 deletions(-)

diff --git a/src/operator/nn/dnnl/dnnl_convolution.cc 
b/src/operator/nn/dnnl/dnnl_convolution.cc
index 072c157..d38009a 100644
--- a/src/operator/nn/dnnl/dnnl_convolution.cc
+++ b/src/operator/nn/dnnl/dnnl_convolution.cc
@@ -118,10 +118,18 @@ 
std::shared_ptr<dnnl::convolution_forward::primitive_desc> GetConvFwdImpl(
           // suboptimal kernel for computation that has the expected memory 
size requirements
           auto conv_pd =
               
std::make_shared<dnnl::convolution_forward::primitive_desc>(desc, attr, engine);
-          while (conv_pd->dst_desc().get_size() != GetArraySize(output) ||
-                 conv_pd->src_desc().get_size() != GetArraySize(data) ||
-                 (!param.dnnl_param.quantized &&
-                  conv_pd->weights_desc().get_size() != 
GetArraySize(weights))) {
+          while (
+              conv_pd->dst_desc().get_size() != GetArraySize(output) ||
+              conv_pd->src_desc().get_size() != GetArraySize(data) ||
+              (!param.dnnl_param.quantized &&
+               conv_pd->weights_desc().get_size() != GetArraySize(weights)) ||
+              // With the upgrade of oneDNN to version 2.4+
+              // 
tests/python/dnnl/subgraphs/test_conv_subgraph.py::test_pos_conv_add[True-data_shape1]
+              // started failing. Switching away from primitive with weight 
dnnl::format_tag
+              // ABcd4b16a4b in order to temporarily fix the issue until full 
fix arrives.
+              // Tracking issue: 
https://github.com/apache/incubator-mxnet/issues/20826.
+              (param.dnnl_param.quantized && conv_pd->weights_desc().dims()[1] 
< 4 &&
+               conv_pd->weights_desc().data.padded_dims[1] == 16)) {
             // next_impl() will visit desc and engine, please make sure they 
are still alive here.
             CHECK(conv_pd->next_impl()) << "No convolution implementation for 
this request.";
           }
diff --git a/tests/python/dnnl/subgraphs/subgraph_common.py 
b/tests/python/dnnl/subgraphs/subgraph_common.py
index b3bf5b0..be2adb9 100644
--- a/tests/python/dnnl/subgraphs/subgraph_common.py
+++ b/tests/python/dnnl/subgraphs/subgraph_common.py
@@ -42,10 +42,7 @@ config =  {
   }
 }
 
-DATA_SHAPE=[(64, 4, 10, 10), (4, 4, 24, 24), (1, 16, 32, 32)]
-# Second shape has been temporairly changed from (4, 3, 24, 24) to (4, 4, 24, 
24) due to
-# a bug regarding conv+sum fuse with the amount of input channels < 4. It will 
be reverted
-# as soon as the problem is fixed. Issue: 
https://github.com/apache/incubator-mxnet/issues/20826.
+DATA_SHAPE=[(64, 4, 10, 10), (4, 3, 24, 24), (1, 16, 32, 32)]
 
 # Helpers
 class RELU6(nn.HybridBlock):

Reply via email to