Repository: incubator-singa
Updated Branches:
  refs/heads/master 7956019cf -> 4dfee5208


SINGA-261 Add version ID into the checkpoint files

replace CUDNN_MAJOR_VERSION with CUDNN_MAJOR (from cudnn.h) in singa cpp code.

add cudnn version test for pysinga

add cuda test for pysinga (device and layer)


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/4dfee520
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/4dfee520
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/4dfee520

Branch: refs/heads/master
Commit: 4dfee520809e467c7794c20e29b6d90d771c1653
Parents: 7956019
Author: wangwei <[email protected]>
Authored: Wed Dec 7 11:31:12 2016 +0800
Committer: Wei Wang <[email protected]>
Committed: Fri Dec 16 22:35:47 2016 +0800

----------------------------------------------------------------------
 cmake/Cuda.cmake                     |  2 --
 cmake/Templates/singa_config.h.in    |  6 +++---
 python/singa/device.py               | 11 ++++++++---
 python/singa/layer.py                | 14 ++++++++------
 src/model/layer/cudnn_convolution.cc |  2 +-
 src/model/layer/cudnn_pooling.cc     |  2 +-
 test/singa/test_cudnn_lrn.cc         |  4 ++--
 7 files changed, 23 insertions(+), 18 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4dfee520/cmake/Cuda.cmake
----------------------------------------------------------------------
diff --git a/cmake/Cuda.cmake b/cmake/Cuda.cmake
index e590bb1..35109aa 100644
--- a/cmake/Cuda.cmake
+++ b/cmake/Cuda.cmake
@@ -33,8 +33,6 @@ IF(USE_CUDNN)
     FIND_PACKAGE(CUDNN REQUIRED)
     INCLUDE_DIRECTORIES(SYSTEM ${CUDNN_INCLUDE_DIR})
     LIST(APPEND SINGA_LINKER_LIBS ${CUDNN_LIBRARIES})
-    #ADD_DEFINITIONS(-DUSE_CUDNN)
-    #ADD_DEFINITIONS(-DCUDNN_VERSION_MAJOR=${CUDNN_VERSION_MAJOR})
 ENDIF()
 
 INCLUDE_DIRECTORIES(SYSTEM ${CUDA_INCLUDE_DIRS})

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4dfee520/cmake/Templates/singa_config.h.in
----------------------------------------------------------------------
diff --git a/cmake/Templates/singa_config.h.in 
b/cmake/Templates/singa_config.h.in
index 181c9fd..24ee12d 100644
--- a/cmake/Templates/singa_config.h.in
+++ b/cmake/Templates/singa_config.h.in
@@ -45,7 +45,7 @@
 
 // cudnn version
 #cmakedefine USE_CUDNN
-#cmakedefine CUDNN_MAJOR_VERSION @CUDNN_MAJOR_VERSION@
-#cmakedefine CUDNN_MINOR_VERSION @CUDNN_MINOR_VERSION@
-#cmakedefine CUDNN_PATCH_VERSION @CUDNN_PATCH_VERSION@
+// #cmakedefine CUDNN_MAJOR_VERSION @CUDNN_MAJOR_VERSION@
+// #cmakedefine CUDNN_MINOR_VERSION @CUDNN_MINOR_VERSION@
+// #cmakedefine CUDNN_PATCH_VERSION @CUDNN_PATCH_VERSION@
 // #cmakedefine CUDNN_VERSION @CUDNN_VERSION@

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4dfee520/python/singa/device.py
----------------------------------------------------------------------
diff --git a/python/singa/device.py b/python/singa/device.py
index 2d93823..f250f9e 100644
--- a/python/singa/device.py
+++ b/python/singa/device.py
@@ -52,18 +52,22 @@ class Device(object):
 
 
 def get_num_gpus():
+    assert singa.USE_CUDA, 'SINGA is not compiled with CDUA/GPU'
     return singa.Platform.GetNumGPUs()
 
 
 def get_gpu_ids():
+    assert singa.USE_CUDA, 'SINGA is not compiled with CDUA/GPU'
     return singa.Platform.GetGPUIDs()
 
 
 def get_gpu_mem_size(id):
+    assert singa.USE_CUDA, 'SINGA is not compiled with CDUA/GPU'
     return singa.Platform.GetGPUMemSize(id)
 
 
 def device_query(id, verbose=False):
+    assert singa.USE_CUDA, 'SINGA is not compiled with CDUA/GPU'
     return singa.Platform.DeviceQuery(id, verbose)
 
 
@@ -75,7 +79,7 @@ def create_cuda_gpus(num):
     Returns:
         a list of swig converted CudaGPU devices.
     '''
-
+    assert singa.USE_CUDA, 'SINGA is not compiled with CDUA/GPU'
     return singa.Platform.CreateCudaGPUs(num)
 
 
@@ -85,7 +89,7 @@ def create_cuda_gpu():
     Returns:
         a swig converted CudaGPU device.
     '''
-
+    assert singa.USE_CUDA, 'SINGA is not compiled with CDUA/GPU'
     return singa.Platform.CreateCudaGPUs(1)[0]
 
 
@@ -98,6 +102,7 @@ def create_cuda_gpus_on(device_ids):
     Returns:
         a list of swig converted CudaGPU devices.
     '''
+    assert singa.USE_CUDA, 'SINGA is not compiled with CDUA/GPU'
     return singa.Platform.CreateCudaGPUsOn(device_ids)
 
 
@@ -110,6 +115,7 @@ def create_cuda_gpu_on(device_id):
     Returns:
         a swig converted CudaGPU device.
     '''
+    assert singa.USE_CUDA, 'SINGA is not compiled with CDUA/GPU'
     devices = create_cuda_gpus_on([device_id])
     return devices[0]
 
@@ -120,4 +126,3 @@ default_device = singa.Platform.GetDefaultDevice()
 def get_default_device():
     '''Get the default host device which is a CppCPU device'''
     return default_device
-

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4dfee520/python/singa/layer.py
----------------------------------------------------------------------
diff --git a/python/singa/layer.py b/python/singa/layer.py
index a9f6a3b..583126a 100644
--- a/python/singa/layer.py
+++ b/python/singa/layer.py
@@ -144,7 +144,6 @@ class Layer(object):
         else:
             self.layer = _create_layer(engine, str(self.conf.type))
 
-
     def get_output_sample_shape(self):
         '''Called after setup to get the shape of the output sample(s).
 
@@ -642,8 +641,8 @@ class Dropout(Layer):
         super(Dropout, self).__init__(name)
         conf = self.conf.dropout_conf
         conf.dropout_ratio = p
-        # 'cudnn' works for v>=5.0
-        if engine.lower() == 'cudnn':
+        # dropout is support in cudnn since V5
+        if engine.lower() == 'cudnn' and cudnn_version < 5000:
             myengine = 'singacuda'
         else:
             myengine = engine
@@ -836,7 +835,7 @@ class Concat(Layer):
         self.in_shapes = input_sample_shapes
         self.axis = axis
         self.conf.concat_conf.axis = axis
-       if engine == "cudnn":
+        if engine == "cudnn":
             self.layer = _create_layer('singacuda', 'Concat')
         else:
             self.layer = _create_layer(engine, 'Concat')
@@ -857,7 +856,6 @@ class Concat(Layer):
         ys = super(Concat, self).forward(flag, inputs)
         return ys[0]
 
-
     def backward(self, flag, dy):
         '''Backward propagate gradients through this layer.
 
@@ -891,7 +889,7 @@ class Slice(Layer):
         self.axis = axis
         self.conf.slice_conf.axis = axis
         self.conf.slice_conf.slice_point.extend(slice_point)
-       if engine == "cudnn":
+        if engine == "cudnn":
             self.layer = _create_layer('singacuda', 'Slice')
         else:
             self.layer = _create_layer(engine, 'Slice')
@@ -956,6 +954,8 @@ class RNN(Layer):
     def __init__(self, name, hidden_size, rnn_mode='lstm', dropout=0.0,
                  num_stacks=1, input_mode='linear', bidirectional=False,
                  param_specs=None, input_sample_shape=None):
+        assert cudnn_version >= 5005, 'RNN is supported since CUDNN V5.0.5; '\
+            'This version is %d' % cudnn_version
         super(RNN, self).__init__(name)
         conf = self.conf.rnn_conf
         assert hidden_size > 0, 'Hidden feature size must > 0'
@@ -1090,6 +1090,8 @@ def _create_layer(eng, layer):
         layer, layer type, e.g., 'convolution', 'pooling'; for activation
         layers, use the specific activation mode, e.g. 'relu', 'tanh'.
     '''
+    assert eng != 'cudnn' or cudnn_version > 0, 'CUDNN is not enabled, please 
'\
+        'change the engine, e.g., layer.engine=singacpp'
     layer_type = eng + '_' + layer
     return singa_wrap.CreateLayer(layer_type.lower())
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4dfee520/src/model/layer/cudnn_convolution.cc
----------------------------------------------------------------------
diff --git a/src/model/layer/cudnn_convolution.cc 
b/src/model/layer/cudnn_convolution.cc
index 60ac526..54bba06 100644
--- a/src/model/layer/cudnn_convolution.cc
+++ b/src/model/layer/cudnn_convolution.cc
@@ -86,7 +86,7 @@ void CudnnConvolution::InitCudnn(const Tensor &input) {
       filter_desc_, GetCudnnDataType(dtype), CUDNN_TENSOR_NCHW, num_filters_,
       channels_, kernel_h_, kernel_w_));
 #else
-  LOG(FATAL) << "Not supported CUDNN version = " << CUDNN_VERSION_MAJOR;
+  LOG(FATAL) << "Not supported CUDNN version = " << CUDNN_MAJOR;
 #endif
 
   if (prefer_ == "fastest" || prefer_ == "limited_workspace" ||

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4dfee520/src/model/layer/cudnn_pooling.cc
----------------------------------------------------------------------
diff --git a/src/model/layer/cudnn_pooling.cc b/src/model/layer/cudnn_pooling.cc
index 7c1a465..d5b1aa3 100644
--- a/src/model/layer/cudnn_pooling.cc
+++ b/src/model/layer/cudnn_pooling.cc
@@ -73,7 +73,7 @@ void CudnnPooling::InitCudnn(const Tensor &input) {
                                              kernel_h_, kernel_w_, pad_h_,
                                              pad_w_, stride_h_, stride_w_));
 #else
-  LOG(FATAL) << "Not supported CUDNN version = " << CUDNN_VERSION_MAJOR;
+  LOG(FATAL) << "Not supported CUDNN version = " << CUDNN_MAJOR;
 #endif
   has_init_cudnn_ = true;
 }

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/4dfee520/test/singa/test_cudnn_lrn.cc
----------------------------------------------------------------------
diff --git a/test/singa/test_cudnn_lrn.cc b/test/singa/test_cudnn_lrn.cc
index 04ca5f2..df251f5 100644
--- a/test/singa/test_cudnn_lrn.cc
+++ b/test/singa/test_cudnn_lrn.cc
@@ -23,7 +23,7 @@
 
 #ifdef USE_CUDNN
 // cudnn lrn is added in cudnn 4
-#if CUDNN_VERSION_MAJOR >=4
+#if CUDNN_MAJOR >=4
 #include "gtest/gtest.h"
 
 using singa::CudnnLRN;
@@ -199,5 +199,5 @@ TEST(CudnnLRN, Backward) {
   EXPECT_NEAR(0.00327978, dxptr[31], 1e-6f);
 }
 
-#endif  //  CUDNN_VERSION_MAJOR >= 4
+#endif  //  CUDNN_MAJOR >= 4
 #endif  //  USE_CUDNN

Reply via email to