Repository: incubator-singa Updated Branches: refs/heads/master 5f67e578b -> 369d87960
SINGA-136 Support cuDNN v4 fixed cuDnn batchnorm layer bug: test net Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/3b2e8953 Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/3b2e8953 Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/3b2e8953 Branch: refs/heads/master Commit: 3b2e895362a37be11e22bf2dc5651c7d35ce63e1 Parents: 4b4ad05 Author: seaok <[email protected]> Authored: Thu Apr 7 18:53:50 2016 +0800 Committer: seaok <[email protected]> Committed: Thu Apr 7 18:53:50 2016 +0800 ---------------------------------------------------------------------- examples/cifar10/cudnn_bm.conf | 42 +++++++++++++++++++++++++++++ include/singa/neuralnet/neuron_layer.h | 8 ++++-- src/neuralnet/neuron_layer/bm.cc | 12 +++------ src/neuralnet/neuron_layer/cudnn_bm.cc | 10 +++---- 4 files changed, 56 insertions(+), 16 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/3b2e8953/examples/cifar10/cudnn_bm.conf ---------------------------------------------------------------------- diff --git a/examples/cifar10/cudnn_bm.conf b/examples/cifar10/cudnn_bm.conf index 071ed8a..2ca30cb 100644 --- a/examples/cifar10/cudnn_bm.conf +++ b/examples/cifar10/cudnn_bm.conf @@ -127,6 +127,20 @@ neuralnet { value:0 } } + param { + name: "s13" + init { + type:kConstant + value:0 + } + } + param { + name: "s14" + init { + type:kConstant + value:0 + } + } srclayers:"pool1" } layer { @@ -182,6 +196,20 @@ neuralnet { value:0 } } + param { + name: "s23" + init { + type:kConstant + value:0 + } + } + param { + name: "s24" + init { + type:kConstant + value:0 + } + } srclayers:"conv2" } layer { @@ -247,6 +275,20 @@ neuralnet { value:0 } } + param { + name: "s33" + init { + type:kConstant + value:0 + } + } + param { + name: "s34" + init { + type:kConstant + value:0 + } + } srclayers:"conv3" } layer { http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/3b2e8953/include/singa/neuralnet/neuron_layer.h ---------------------------------------------------------------------- diff --git a/include/singa/neuralnet/neuron_layer.h b/include/singa/neuralnet/neuron_layer.h index 3cdc137..7c041f1 100644 --- a/include/singa/neuralnet/neuron_layer.h +++ b/include/singa/neuralnet/neuron_layer.h @@ -359,6 +359,7 @@ class BMLayer : public NeuronLayer { void ComputeGradient(int flag, const vector<Layer*>& srclayers) override; protected: Param *bnScale_, *bnBias_; + Param *resultRunningMean_, *resultRunningInvVariance_; int batchsize_, channels_, height_, width_; }; @@ -468,14 +469,17 @@ class CudnnBMLayer : public BMLayer, public CudnnBase { void InitCudnn() override; void ComputeFeature(int flag, const vector<Layer*>& srclayers) override; void ComputeGradient(int flag, const vector<Layer*>& srclayers) override; + const std::vector<Param*> GetParams() const override { + std::vector<Param*> params{bnScale_, bnBias_, + resultRunningMean_, resultRunningInvVariance_}; + return params; + } protected: cudnnBatchNormMode_t mode_; cudnnTensorDescriptor_t bnScaleBiasMeanVar_desc_; cudnnTensorDescriptor_t bnScaleBiasDiff_desc_; Blob<float> resultSaveMean_; Blob<float> resultSaveInvVariance_; - Blob<float> resultRunningMean_; - Blob<float> resultRunningInvVariance_; }; #endif // USE_CUDNN http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/3b2e8953/src/neuralnet/neuron_layer/bm.cc ---------------------------------------------------------------------- diff --git a/src/neuralnet/neuron_layer/bm.cc b/src/neuralnet/neuron_layer/bm.cc index 5784595..66e303c 100644 --- a/src/neuralnet/neuron_layer/bm.cc +++ b/src/neuralnet/neuron_layer/bm.cc @@ -45,16 +45,12 @@ void BMLayer::Setup(const LayerProto& conf, bnBias_ = Param::Create(conf.param(1)); bnBias_->Setup(vector<int>{1, channels_, 1, 1}); - bnScale_->InitValues(1); - /* float* p = bnScale_->data().mutable_cpu_data(); + resultRunningMean_ = Param::Create(conf.param(2)); + resultRunningMean_->Setup(vector<int>{1, channels_, 1, 1}); - cout<<"inite param"<<endl; - for(int i=0;i<3;++i) { -// p[i]=1.0; - cout<<p[i]<<" "; - } - cout<<endl;*/ + resultRunningInvVariance_ = Param::Create(conf.param(3)); + resultRunningInvVariance_->Setup(vector<int>{1, channels_, 1, 1}); } void BMLayer::ComputeFeature(int flag, const vector<Layer*>& srclayers) { http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/3b2e8953/src/neuralnet/neuron_layer/cudnn_bm.cc ---------------------------------------------------------------------- diff --git a/src/neuralnet/neuron_layer/cudnn_bm.cc b/src/neuralnet/neuron_layer/cudnn_bm.cc index fdc9ea9..d33c61d 100644 --- a/src/neuralnet/neuron_layer/cudnn_bm.cc +++ b/src/neuralnet/neuron_layer/cudnn_bm.cc @@ -69,8 +69,6 @@ void CudnnBMLayer::InitCudnn() { resultSaveMean_.Reshape(shape); resultSaveInvVariance_.Reshape(shape); - resultRunningMean_.Reshape(shape); - resultRunningInvVariance_.Reshape(shape); mode_ = CUDNN_BATCHNORM_SPATIAL; } @@ -97,8 +95,8 @@ void CudnnBMLayer::ComputeFeature(int flag, bnScaleBiasMeanVar_desc_, bnScale_->data().gpu_data(), bnBias_->data().gpu_data(), - resultRunningMean_.gpu_data(), - resultRunningInvVariance_.gpu_data(), + resultRunningMean_->data().gpu_data(), + resultRunningInvVariance_->data().gpu_data(), epsilon)); } else { CHECK_CUDNN(cudnnBatchNormalizationForwardTraining(handle_, @@ -113,8 +111,8 @@ void CudnnBMLayer::ComputeFeature(int flag, bnScale_->data().gpu_data(), bnBias_->data().gpu_data(), exponentialAverageFactor, - resultRunningMean_.mutable_gpu_data(), - resultRunningInvVariance_.mutable_gpu_data(), + resultRunningMean_->data().mutable_gpu_data(), + resultRunningInvVariance_->data().mutable_gpu_data(), epsilon, resultSaveMean_.mutable_gpu_data(), resultSaveInvVariance_.mutable_gpu_data()));
