SINGA-157 Change the priority of cudnn library and install libsingagpu.so

Add detection on the existence of libsingagpu.so.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/061f8e30
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/061f8e30
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/061f8e30

Branch: refs/heads/master
Commit: 061f8e3034ecbcd1c2f4e3c9bfde27d7a94a62b2
Parents: e5f111c
Author: xiezl <[email protected]>
Authored: Thu Apr 7 13:16:27 2016 +0800
Committer: xiezl <[email protected]>
Committed: Thu Apr 7 13:30:12 2016 +0800

----------------------------------------------------------------------
 Makefile.am                            | 4 +++-
 include/singa/neuralnet/neuron_layer.h | 7 ++++---
 2 files changed, 7 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/061f8e30/Makefile.am
----------------------------------------------------------------------
diff --git a/Makefile.am b/Makefile.am
index e76d952..4eb11e1 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -333,7 +333,9 @@ all-local:
                cp -f .libs/_driver.so tool/python/singa/; \
                touch tool/python/singa/__init__.py; \
        fi
-       cp libsingagpu.so .libs/
+       @if [ -f "libsingagpu.so" ]; then \
+               cp libsingagpu.so .libs/; \
+       fi
 
 # For rat check
 rat:

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/061f8e30/include/singa/neuralnet/neuron_layer.h
----------------------------------------------------------------------
diff --git a/include/singa/neuralnet/neuron_layer.h 
b/include/singa/neuralnet/neuron_layer.h
index c9a30da..151cdf9 100644
--- a/include/singa/neuralnet/neuron_layer.h
+++ b/include/singa/neuralnet/neuron_layer.h
@@ -468,8 +468,10 @@ class CudnnSoftmaxLayer : public SoftmaxLayer, public 
CudnnBase {
   void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
 };
 
+
+#if CUDNN_MAJOR == 4
 /**
- * Cudnn Batch Normalization layer
+ * Cudnn Batch Normalization layer -- supported by cudnn_v4
  */
 class CudnnBMLayer : public BMLayer, public CudnnBase {
  public:
@@ -478,9 +480,7 @@ class CudnnBMLayer : public BMLayer, public CudnnBase {
   void ComputeFeature(int flag, const vector<Layer*>& srclayers) override;
   void ComputeGradient(int flag, const vector<Layer*>& srclayers) override;
  protected:
-#if CUDNN_MAJOR == 4
   cudnnBatchNormMode_t mode_;
-#endif
   cudnnTensorDescriptor_t bnScaleBiasMeanVar_desc_;
   cudnnTensorDescriptor_t bnScaleBiasDiff_desc_;
   Blob<float> resultSaveMean_;
@@ -488,6 +488,7 @@ class CudnnBMLayer : public BMLayer, public CudnnBase {
   Blob<float> resultRunningMean_;
   Blob<float> resultRunningInvVariance_;
 };
+#endif
 #endif  // USE_CUDNN
 
 /******************** RBM layers *****************/

Reply via email to