This is an automated email from the ASF dual-hosted git repository.
bgawrych pushed a commit to branch v1.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/v1.x by this push:
new 65b61ea [v1.x][submodule] Upgrade oneDNN to v2.4.4 (#20666)
65b61ea is described below
commit 65b61ea4be75e0b2d2319f09c843000d1d5e24b7
Author: bartekkuncer <[email protected]>
AuthorDate: Fri Jan 28 14:33:59 2022 +0100
[v1.x][submodule] Upgrade oneDNN to v2.4.4 (#20666)
* [v1.x][submodule] Upgrade oneDNN to v2.4.4
* Apply clang-format with settings from master
---
3rdparty/mkldnn | 2 +-
src/operator/nn/mkldnn/mkldnn_convolution.cc | 17 ++++++++++++-----
tests/cpp/operator/mkldnn_test.cc | 2 +-
tools/dependencies/README.md | 2 +-
4 files changed, 15 insertions(+), 8 deletions(-)
diff --git a/3rdparty/mkldnn b/3rdparty/mkldnn
index e2d4525..145c4b5 160000
--- a/3rdparty/mkldnn
+++ b/3rdparty/mkldnn
@@ -1 +1 @@
-Subproject commit e2d45252ae9c3e91671339579e3c0f0061f81d49
+Subproject commit 145c4b50196ac90ec1b946fb80cb5cef6e7d2d35
diff --git a/src/operator/nn/mkldnn/mkldnn_convolution.cc
b/src/operator/nn/mkldnn/mkldnn_convolution.cc
index dcacf24..2a2c6d7 100644
--- a/src/operator/nn/mkldnn/mkldnn_convolution.cc
+++ b/src/operator/nn/mkldnn/mkldnn_convolution.cc
@@ -113,16 +113,23 @@
std::shared_ptr<mkldnn::convolution_forward::primitive_desc> GetConvFwdImpl(
[¶m, &data, &weights, &output, &attr](const
mkldnn::convolution_forward::desc& desc) {
auto engine = CpuEngine::Get()->get_engine();
try {
- // MKL-DNN introduced padded formats since 0.15 which require more
memory
- // compared to the actual size of the tensor. Currently, MKL-DNN
operators
- // still reuse memory from memory planning, so here we need to
select a
- // suboptimal kernel for computation that has the expected memory
size requirements
+ // MKLDNN introduced padded formats since 0.15 which require more
memory compared to the
+ // actual size of the tensor. Currently, MKLDNN operators still
reuse memory from memory
+ // planning, so here we need to select a suboptimal kernel for
computation that has the
+ // expected memory size requirements
auto conv_pd =
std::make_shared<mkldnn::convolution_forward::primitive_desc>(desc, attr,
engine);
while (conv_pd->dst_desc().get_size() != GetArraySize(output) ||
conv_pd->src_desc().get_size() != GetArraySize(data) ||
(!param.mkldnn_param.quantized &&
- conv_pd->weights_desc().get_size() !=
GetArraySize(weights))) {
+ conv_pd->weights_desc().get_size() != GetArraySize(weights))
||
+ // With the upgrade of MKLDNN to version 2.4+
+ // tests/python/mkl/test_subgraph.py::test_pos_conv_add
started failing. Switching
+ // away from primitive with weight mkldnn::format_tag
ABcd4b16a4b in order to
+ // temporairly fix the issue until full fix arrives. Tracking
issue:
+ // https://github.com/apache/incubator-mxnet/issues/20826.
+ (param.mkldnn_param.quantized &&
conv_pd->weights_desc().dims()[1] < 4 &&
+ conv_pd->weights_desc().data.padded_dims[1] == 16)) {
// next_impl() will visit desc and engine, please make sure they
are still alive here.
CHECK(conv_pd->next_impl()) << "No convolution implementation for
this request.";
}
diff --git a/tests/cpp/operator/mkldnn_test.cc
b/tests/cpp/operator/mkldnn_test.cc
index c7ae4a3..de6bd36 100644
--- a/tests/cpp/operator/mkldnn_test.cc
+++ b/tests/cpp/operator/mkldnn_test.cc
@@ -101,7 +101,7 @@ static void VerifyDefMem(const mkldnn::memory& mem) {
TEST(MKLDNN_UTIL_FUNC, MemFormat) {
// Check whether the number of format is correct.
- CHECK_EQ(mkldnn_format_tag_last, 385);
+ CHECK_EQ(mkldnn_format_tag_last, 495);
CHECK_EQ(mkldnn_nchw, 5);
CHECK_EQ(mkldnn_oihw, 5);
}
diff --git a/tools/dependencies/README.md b/tools/dependencies/README.md
index ec1e800..5fad36b 100644
--- a/tools/dependencies/README.md
+++ b/tools/dependencies/README.md
@@ -54,7 +54,7 @@ The dependencies could be categorized by several groups: BLAS
libraries, CPU-bas
| Dependencies | MXNet Version |
| :------------: |:-------------:|
|OpenBLAS| 0.3.3 |
-|MKLDNN| 0.19 |
+|MKLDNN| 2.4.4 |
|CUDA| 10.1 |
|cuDNN| 7.5.1 |
|NCCL| 2.4.2 |