This is an automated email from the ASF dual-hosted git repository.

bgawrych pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 90d9054b7b [submodule] Upgrade oneDNN to v2.6 (#20982)
90d9054b7b is described below

commit 90d9054b7b3f8d38fd693278d440524e5815f59d
Author: bartekkuncer <[email protected]>
AuthorDate: Tue Apr 19 19:23:42 2022 +0200

    [submodule] Upgrade oneDNN to v2.6 (#20982)
---
 3rdparty/onednn                          |  2 +-
 src/operator/nn/dnnl/dnnl_convolution.cc | 64 ++++++++++++++------------------
 tests/cpp/operator/dnnl_test.cc          |  2 +-
 tools/dependencies/README.md             |  2 +-
 4 files changed, 31 insertions(+), 39 deletions(-)

diff --git a/3rdparty/onednn b/3rdparty/onednn
index 9a35435c18..52b5f107dd 160000
--- a/3rdparty/onednn
+++ b/3rdparty/onednn
@@ -1 +1 @@
-Subproject commit 9a35435c18722ff17a48fb60bceac42bfdf78754
+Subproject commit 52b5f107dd9cf10910aaa19cb47f3abf9b349815
diff --git a/src/operator/nn/dnnl/dnnl_convolution.cc 
b/src/operator/nn/dnnl/dnnl_convolution.cc
index f28f273d3b..60c65ffa47 100644
--- a/src/operator/nn/dnnl/dnnl_convolution.cc
+++ b/src/operator/nn/dnnl/dnnl_convolution.cc
@@ -108,42 +108,34 @@ 
std::shared_ptr<dnnl::convolution_forward::primitive_desc> GetConvFwdImpl(
     int mask = (param.requantize_scales.size() > 1) ? 2 : 0;
     attr.set_output_scales(mask, param.requantize_scales);
   }
-  auto GetConvFwdPd = [&param, &data, &weights, &output, &attr](
-                          const dnnl::convolution_forward::desc& desc) {
-    auto engine = CpuEngine::Get()->get_engine();
-    try {
-      // DNNL introduced padded formats since 0.15 which require more memory
-      // compared to the actual size of the tensor. Currently, DNNL operators
-      // still reuse memory from memory planning, so here we need to select a
-      // suboptimal kernel for computation that has the expected memory size 
requirements
-      auto conv_pd =
-          std::make_shared<dnnl::convolution_forward::primitive_desc>(desc, 
attr, engine);
-      while (
-          conv_pd->dst_desc().get_size() != GetArraySize(output) ||
-          conv_pd->src_desc().get_size() != GetArraySize(data) ||
-          (!param.dnnl_param.quantized &&
-           conv_pd->weights_desc().get_size() != GetArraySize(weights)) ||
-          // With the upgrade of oneDNN to version 2.4+
-          // 
tests/python/dnnl/subgraphs/test_conv_subgraph.py::test_pos_conv_add[True-data_shape1]
-          // started failing. Switching away from blocking weights in order to 
temporarily fix
-          // the issue until full fix arrives. Tracking issue:
-          // https://github.com/apache/incubator-mxnet/issues/20826.
-          (param.dnnl_param.quantized && conv_pd->weights_desc().dims()[1] < 4 
&&
-           conv_pd->weights_desc().data.padded_dims[1] != 
conv_pd->weights_desc().dims()[1])) {
-        // next_impl() will visit desc and engine, please make sure they are 
still alive here.
-        CHECK(conv_pd->next_impl()) << "No convolution implementation for this 
request.";
-      }
-      return conv_pd;
-    } catch (dnnl::error& e) {
-      if (e.status == dnnl_unimplemented && param.dnnl_param.quantized) {
-        LOG(ERROR) << "AVX512-BW support or Intel(R) MKL dependency is "
-                      "required for int8 convolution";
-      } else {
-        LOG(ERROR) << e.message;
-      }
-      throw;
-    }
-  };
+  auto GetConvFwdPd =
+      [&param, &data, &weights, &output, &attr](const 
dnnl::convolution_forward::desc& desc) {
+        auto engine = CpuEngine::Get()->get_engine();
+        try {
+          // DNNL introduced padded formats since 0.15 which require more 
memory
+          // compared to the actual size of the tensor. Currently, DNNL 
operators
+          // still reuse memory from memory planning, so here we need to 
select a
+          // suboptimal kernel for computation that has the expected memory 
size requirements
+          auto conv_pd =
+              
std::make_shared<dnnl::convolution_forward::primitive_desc>(desc, attr, engine);
+          while (conv_pd->dst_desc().get_size() != GetArraySize(output) ||
+                 conv_pd->src_desc().get_size() != GetArraySize(data) ||
+                 (!param.dnnl_param.quantized &&
+                  conv_pd->weights_desc().get_size() != 
GetArraySize(weights))) {
+            // next_impl() will visit desc and engine, please make sure they 
are still alive here.
+            CHECK(conv_pd->next_impl()) << "No convolution implementation for 
this request.";
+          }
+          return conv_pd;
+        } catch (dnnl::error& e) {
+          if (e.status == dnnl_unimplemented && param.dnnl_param.quantized) {
+            LOG(ERROR) << "AVX512-BW support or Intel(R) MKL dependency is "
+                          "required for int8 convolution";
+          } else {
+            LOG(ERROR) << e.message;
+          }
+          throw;
+        }
+      };
 
   if (param.conv_param.dilate.ndim() == 0 && bias_md_ptr == nullptr) {
     dnnl::convolution_forward::desc desc(prop,
diff --git a/tests/cpp/operator/dnnl_test.cc b/tests/cpp/operator/dnnl_test.cc
index 2874df1947..a420c2a7b9 100644
--- a/tests/cpp/operator/dnnl_test.cc
+++ b/tests/cpp/operator/dnnl_test.cc
@@ -101,7 +101,7 @@ static void VerifyDefMem(const dnnl::memory& mem) {
 
 TEST(DNNL_UTIL_FUNC, MemFormat) {
   // Check whether the number of format is correct.
-  CHECK_EQ(dnnl_format_tag_last, 503);
+  CHECK_EQ(dnnl_format_tag_last, 514);
   CHECK_EQ(dnnl_nchw, 5);
   CHECK_EQ(dnnl_oihw, 5);
 }
diff --git a/tools/dependencies/README.md b/tools/dependencies/README.md
index 6d18d1933d..e3e7c85ca6 100644
--- a/tools/dependencies/README.md
+++ b/tools/dependencies/README.md
@@ -57,7 +57,7 @@ The dependencies could be categorized by several groups: BLAS 
libraries, CPU-bas
 | Dependencies  | MXNet Version |
 | :------------: |:-------------:| 
 |OpenBLAS| 0.3.9 |
-|oneDNN| 2.5.2 |
+|oneDNN| 2.6 |
 |CUDA| 10.1 |
 |cuDNN| 7.5.1 |
 |NCCL| 2.4.2 |

Reply via email to