This is an automated email from the ASF dual-hosted git repository.
zhasheng pushed a commit to branch v1.3.x
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/v1.3.x by this push:
new edfcfcf [MXNET-1179] Enforce deterministic algorithms in convolution
layers (v1.3.x) (#13152)
edfcfcf is described below
commit edfcfcff0bdb471bf6c93d5a2eb05da6efeed1a4
Author: Anton Chernov <[email protected]>
AuthorDate: Wed Nov 7 19:23:00 2018 +0100
[MXNET-1179] Enforce deterministic algorithms in convolution layers
(v1.3.x) (#13152)
* add env variable to choose deterministic cudnn alg
* set default value to false
* fix build failure in Windows GPU
* revert the previous change
* only check determinism in CUDNN 7.x release
* Add cudnn version check
* fix lint error
---
src/operator/nn/cudnn/cudnn_convolution-inl.h | 4 ++++
src/operator/nn/cudnn/cudnn_deconvolution-inl.h | 4 ++++
src/operator/nn/cudnn/cudnn_pooling-inl.h | 5 +++++
3 files changed, 13 insertions(+)
diff --git a/src/operator/nn/cudnn/cudnn_convolution-inl.h
b/src/operator/nn/cudnn/cudnn_convolution-inl.h
index 4dc7ff8..102eb54 100644
--- a/src/operator/nn/cudnn/cudnn_convolution-inl.h
+++ b/src/operator/nn/cudnn/cudnn_convolution-inl.h
@@ -884,6 +884,7 @@ class CuDNNConvolutionOp {
size_t workspace_byte, CuDNNAlgo<AlgoType> *algo) {
// Determine the fastest acceptable algo that matches the algo_preference
(-1 = any),
// regardless of mathType.
+ bool enforce_determinism = dmlc::GetEnv("MXNET_ENFORCE_DETERMINISM",
false);
for (decltype(perf_results.size()) i = 0; i != perf_results.size(); ++i) {
const auto &result = perf_results[i];
bool algo_is_tensor_core = false;
@@ -891,6 +892,9 @@ class CuDNNConvolutionOp {
algo_is_tensor_core = result.mathType == CUDNN_TENSOR_OP_MATH;
#endif
if (result.status == CUDNN_STATUS_SUCCESS &&
+ #if CUDNN_MAJOR >= 7
+ (!enforce_determinism || result.determinism ==
cudnnDeterminism_t::CUDNN_DETERMINISTIC) &&
+ #endif
(param_.cudnn_tune.value() != conv::kLimited || result.memory <=
workspace_byte)) {
algo->Set(result.algo, algo_is_tensor_core);
return;
diff --git a/src/operator/nn/cudnn/cudnn_deconvolution-inl.h
b/src/operator/nn/cudnn/cudnn_deconvolution-inl.h
index c0c5650..72ba2c9 100644
--- a/src/operator/nn/cudnn/cudnn_deconvolution-inl.h
+++ b/src/operator/nn/cudnn/cudnn_deconvolution-inl.h
@@ -829,6 +829,7 @@ class CuDNNDeconvolutionOp {
void AlgoFinalSelect(const std::vector<PerfType> &perf_results, std::string
kernel_name,
size_t workspace_byte, CuDNNAlgo<AlgoType> *algo) {
// Determine the fastest acceptable algo regardless of mathType.
+ bool enforce_determinism = dmlc::GetEnv("MXNET_ENFORCE_DETERMINISM",
false);
for (decltype(perf_results.size()) i = 0; i != perf_results.size(); ++i) {
const auto &result = perf_results[i];
bool algo_is_tensor_core = false;
@@ -836,6 +837,9 @@ class CuDNNDeconvolutionOp {
algo_is_tensor_core = result.mathType == CUDNN_TENSOR_OP_MATH;
#endif
if (result.status == CUDNN_STATUS_SUCCESS &&
+ #if CUDNN_MAJOR >= 7
+ (!enforce_determinism || result.determinism ==
cudnnDeterminism_t::CUDNN_DETERMINISTIC) &&
+ #endif
(param_.cudnn_tune.value() != conv::kLimited || result.memory <=
workspace_byte)) {
algo->Set(result.algo, algo_is_tensor_core);
return;
diff --git a/src/operator/nn/cudnn/cudnn_pooling-inl.h
b/src/operator/nn/cudnn/cudnn_pooling-inl.h
index bc3ee36..89fa73e 100644
--- a/src/operator/nn/cudnn/cudnn_pooling-inl.h
+++ b/src/operator/nn/cudnn/cudnn_pooling-inl.h
@@ -48,7 +48,12 @@ class CuDNNPoolingOp {
param_ = p;
switch (param_.pool_type) {
case pool_enum::kMaxPooling:
+ #if CUDNN_MAJOR >= 7
+ mode_ = dmlc::GetEnv("MXNET_ENFORCE_DETERMINISM", false) ?
+ CUDNN_POOLING_MAX_DETERMINISTIC : CUDNN_POOLING_MAX;
+ #else
mode_ = CUDNN_POOLING_MAX;
+ #endif
break;
case pool_enum::kAvgPooling:
if (param_.count_include_pad.has_value() &&
!param_.count_include_pad.value()) {