SINGA-136 Support cuDNN v4

Fix warnings from using data()/grad() for mutable values.
They are replaced with mutable_data/grad() functions.

Add batchnorm related files into Makefile.am.
The CPU version of batchnorm is empty now.

Note. If the cudnn.h is in cuda/include folder, then the --with-cudnn
is invalid as cuda/include is looked up first. TODO change the order of
these two paths during compilation.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/9c2c2974
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/9c2c2974
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/9c2c2974

Branch: refs/heads/master
Commit: 9c2c29742fd27022b377b524028aa78fb64c888f
Parents: 4b4ad05
Author: Wei Wang <[email protected]>
Authored: Wed Apr 6 23:48:35 2016 +0800
Committer: Wei Wang <[email protected]>
Committed: Wed Apr 6 23:48:35 2016 +0800

----------------------------------------------------------------------
 Makefile.am                            | 6 ++++--
 src/neuralnet/neuron_layer/cudnn_bm.cc | 4 ++--
 2 files changed, 6 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/9c2c2974/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index b2587a4..56af6d4 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -2,7 +2,7 @@ ACLOCAL_AMFLAGS = -I config
 AUTOMAKE_OPTIONS = foreign subdir-objects
 
 #AM_CPPFLAGS = -I$(top_srcdir)/src
-#AM_LDFLAGS = $(LD_FLAGS) 
+#AM_LDFLAGS = $(LD_FLAGS)
 
 MSHADOW_FLAGS = -DMSHADOW_USE_CUDA=0 -DMSHADOW_USE_CBLAS=1 -DMSHADOW_USE_MKL=0
 DEFAULT_FLAGS = -Wall -pthread -fPIC -std=c++11 -Wno-unknown-pragmas \
@@ -36,7 +36,8 @@ CUDNN_SRCS := src/neuralnet/loss_layer/cudnn_softmaxloss.cc \
                          src/neuralnet/neuron_layer/cudnn_pooling.cc \
                          src/neuralnet/neuron_layer/cudnn_activation.cc \
                          src/neuralnet/neuron_layer/cudnn_lrn.cc \
-                         src/neuralnet/neuron_layer/cudnn_convolution.cc
+                         src/neuralnet/neuron_layer/cudnn_convolution.cc \
+                         src/neuralnet/neuron_layer/cudnn_bm.cc
 
 PY_SRCS := tool/python/singa/driver_wrap.cxx \
                   src/driver.cc
@@ -73,6 +74,7 @@ SINGA_SRCS := src/driver.cc \
               src/neuralnet/loss_layer/euclidean.cc \
               src/neuralnet/loss_layer/softmax.cc \
               src/neuralnet/neuron_layer/activation.cc \
+              src/neuralnet/neuron_layer/bm.cc \
               src/neuralnet/neuron_layer/convolution.cc \
               src/neuralnet/neuron_layer/dropout.cc \
               src/neuralnet/neuron_layer/dummy.cc \

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/9c2c2974/src/neuralnet/neuron_layer/cudnn_bm.cc
----------------------------------------------------------------------
diff --git a/src/neuralnet/neuron_layer/cudnn_bm.cc 
b/src/neuralnet/neuron_layer/cudnn_bm.cc
index fdc9ea9..32a4f57 100644
--- a/src/neuralnet/neuron_layer/cudnn_bm.cc
+++ b/src/neuralnet/neuron_layer/cudnn_bm.cc
@@ -141,8 +141,8 @@ void CudnnBMLayer::ComputeGradient(int flag,
       srclayers.at(0)->mutable_grad(this)->mutable_gpu_data(),
       bnScaleBiasDiff_desc_,
       bnScale_->data().gpu_data(),
-      bnScale_->grad().mutable_gpu_data(),
-      bnBias_->grad().mutable_gpu_data(),
+      bnScale_->mutable_grad()->mutable_gpu_data(),
+      bnBias_->mutable_grad()->mutable_gpu_data(),
       epsilon,
       resultSaveMean_.gpu_data(),
       resultSaveInvVariance_.gpu_data()));

Reply via email to