SINGA-235 - Unify the engines for cudnn and singa layers Fixed a bug in alexnet.cc caused by forgeting to udpate the layer construction code. Updated some code to avoid the warnings from the compilation.
Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/a91bf2a7 Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/a91bf2a7 Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/a91bf2a7 Branch: refs/heads/master Commit: a91bf2a7ef9b8ba86973e579f4e170a0aa816444 Parents: 94ffe55 Author: Wei Wang <[email protected]> Authored: Fri Aug 12 17:17:03 2016 +0800 Committer: Wei Wang <[email protected]> Committed: Fri Aug 12 17:19:21 2016 +0800 ---------------------------------------------------------------------- cmake/Thirdparty/FindCUDNN.cmake | 2 +- examples/imagenet/alexnet.cc | 49 +++++++++++++++---------------- test/singa/test_image_transformer.cc | 20 ++++++------- 3 files changed, 34 insertions(+), 37 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/a91bf2a7/cmake/Thirdparty/FindCUDNN.cmake ---------------------------------------------------------------------- diff --git a/cmake/Thirdparty/FindCUDNN.cmake b/cmake/Thirdparty/FindCUDNN.cmake index fbc103c..32b927b 100644 --- a/cmake/Thirdparty/FindCUDNN.cmake +++ b/cmake/Thirdparty/FindCUDNN.cmake @@ -27,7 +27,7 @@ IF(CUDNN_FOUND) ELSE() MATH(EXPR CUDNN_VERSION_SWIG "${CUDNN_VERSION_MAJOR} * 1000 + ${CUDNN_VERSION_MINOR} * 100 + ${CUDNN_VERSION_PATCH}") ENDIF() - MESSAGE(STATUS "Found Cudnn_v${CUDNN_VERSION} at ${CUDNN_INCLUDE_DIR} ${CUDNN_LIBRARIES}") + MESSAGE(STATUS "Found Cudnn_v${CUDNN_VERSION_SWIG} at ${CUDNN_INCLUDE_DIR} ${CUDNN_LIBRARIES}") MARK_AS_ADVANCED(CUDNN_INCLUDE_DIR CUDNN_LIBRARIES) ENDIF() http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/a91bf2a7/examples/imagenet/alexnet.cc ---------------------------------------------------------------------- diff --git a/examples/imagenet/alexnet.cc b/examples/imagenet/alexnet.cc index 3fb5d04..26b2d96 100644 --- a/examples/imagenet/alexnet.cc +++ b/examples/imagenet/alexnet.cc @@ -137,32 +137,29 @@ FeedForwardNet CreateNet() { FeedForwardNet net; Shape s{3, 227, 227}; - net.Add(new CudnnConvolution(), GenConvConf("conv1", 96, 11, 4, 0, 0.01), &s); - net.Add(new CudnnActivation(), GenReLUConf("relu1")); - net.Add(new CudnnPooling(), GenPoolingConf("pool1", true, 3, 2, 0)); - net.Add(new CudnnLRN(), GenLRNConf("lrn1")); - net.Add(new CudnnConvolution(), - GenConvConf("conv2", 256, 5, 1, 2, 0.01, 1.0)); - net.Add(new CudnnActivation(), GenReLUConf("relu2")); - net.Add(new CudnnPooling(), GenPoolingConf("pool2", true, 3, 2, 0)); - net.Add(new CudnnLRN(), GenLRNConf("lrn2")); - net.Add(new CudnnConvolution(), GenConvConf("conv3", 384, 3, 1, 1, 0.01)); - net.Add(new CudnnActivation(), GenReLUConf("relu3")); - net.Add(new CudnnConvolution(), - GenConvConf("conv4", 384, 3, 1, 1, 0.01, 1.0)); - net.Add(new CudnnActivation(), GenReLUConf("relu4")); - net.Add(new CudnnConvolution(), - GenConvConf("conv5", 256, 3, 1, 1, 0.01, 1.0)); - net.Add(new CudnnActivation(), GenReLUConf("relu5")); - net.Add(new CudnnPooling(), GenPoolingConf("pool5", true, 3, 2, 0)); - net.Add(new Flatten(), GenFlattenConf("flat")); - net.Add(new Dense(), GenDenseConf("ip6", 4096, 0.005, 1, 1.0)); - net.Add(new CudnnActivation(), GenReLUConf("relu6")); - net.Add(new Dropout(), GenDropoutConf("drop6", 0.5)); - net.Add(new Dense(), GenDenseConf("ip7", 4096, 0.005, 1, 1.0)); - net.Add(new CudnnActivation(), GenReLUConf("relu7")); - net.Add(new Dropout(), GenDropoutConf("drop7", 0.5)); - net.Add(new Dense(), GenDenseConf("ip8", 1000, 0.01, 1)); + net.Add(GenConvConf("conv1", 96, 11, 4, 0, 0.01), &s); + net.Add(GenReLUConf("relu1")); + net.Add(GenPoolingConf("pool1", true, 3, 2, 0)); + net.Add(GenLRNConf("lrn1")); + net.Add(GenConvConf("conv2", 256, 5, 1, 2, 0.01, 1.0)); + net.Add(GenReLUConf("relu2")); + net.Add(GenPoolingConf("pool2", true, 3, 2, 0)); + net.Add(GenLRNConf("lrn2")); + net.Add(GenConvConf("conv3", 384, 3, 1, 1, 0.01)); + net.Add(GenReLUConf("relu3")); + net.Add(GenConvConf("conv4", 384, 3, 1, 1, 0.01, 1.0)); + net.Add(GenReLUConf("relu4")); + net.Add(GenConvConf("conv5", 256, 3, 1, 1, 0.01, 1.0)); + net.Add(GenReLUConf("relu5")); + net.Add(GenPoolingConf("pool5", true, 3, 2, 0)); + net.Add(GenFlattenConf("flat")); + net.Add(GenDenseConf("ip6", 4096, 0.005, 1, 1.0)); + net.Add(GenReLUConf("relu6")); + net.Add(GenDropoutConf("drop6", 0.5)); + net.Add(GenDenseConf("ip7", 4096, 0.005, 1, 1.0)); + net.Add(GenReLUConf("relu7")); + net.Add(GenDropoutConf("drop7", 0.5)); + net.Add(GenDenseConf("ip8", 1000, 0.01, 1)); return net; } http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/a91bf2a7/test/singa/test_image_transformer.cc ---------------------------------------------------------------------- diff --git a/test/singa/test_image_transformer.cc b/test/singa/test_image_transformer.cc index 92eb1a6..4540aa8 100644 --- a/test/singa/test_image_transformer.cc +++ b/test/singa/test_image_transformer.cc @@ -61,7 +61,7 @@ TEST(ImageTransformer, Apply3D) { srand(time(NULL)); for (size_t i = 0; i < n; i++) x[i] = (float)(rand() % 256); in.CopyDataFromHostPtr<float>(x, n); - size_t resize_height = 4, resize_width = 6; + int resize_height = 4, resize_width = 6; singa::ImageTransformer img_transformer; singa::TransformerConf conf; @@ -90,8 +90,8 @@ TEST(ImageTransformer, Apply3D) { EXPECT_EQ(resize_width, resized.size().width); size_t new_size = resize_height * resize_width * channel; float* xt = new float[new_size]; - for (size_t i = 0; i < resize_height; i++) - for (size_t j = 0; j < resize_width; j++) + for (int i = 0; i < resize_height; i++) + for (int j = 0; j < resize_width; j++) for (size_t k = 0; k < channel; k++) xt[i * resize_width * channel + j * channel + k] = resized.at<cv::Vec3f>(i, j)[k]; for (size_t c = 0; c < 3; c++) @@ -128,7 +128,7 @@ TEST(ImageTransformer, Apply2D) { srand(time(NULL)); for (size_t i = 0; i < n; i++) x[i] = (float)(rand() % 256); in.CopyDataFromHostPtr<float>(x, n); - size_t resize_height = 4, resize_width = 6; + int resize_height = 4, resize_width = 6; singa::ImageTransformer img_transformer; singa::TransformerConf conf; @@ -156,8 +156,8 @@ TEST(ImageTransformer, Apply2D) { EXPECT_EQ(resize_width, resized.size().width); size_t new_size = resize_height * resize_width; float* xt = new float[new_size]; - for (size_t i = 0; i < resize_height; i++) - for (size_t j = 0; j < resize_width; j++) + for (int i = 0; i < resize_height; i++) + for (int j = 0; j < resize_width; j++) xt[i * resize_width + j] = resized.at<cv::Vec<float, 1>>(i, j)[0]; for (size_t h = 0; h < 2; h++) @@ -187,7 +187,7 @@ TEST(ImageTransformer, Resize) { srand(time(NULL)); for (size_t i = 0; i < n; i++) x[i] = (float)(rand() % 256); in.CopyDataFromHostPtr<float>(x, n); - size_t resize_height = 4, resize_width = 5; + int resize_height = 4, resize_width = 5; singa::Tensor out = singa::resize(in, resize_height, resize_width, "HWC"); const float* y = out.data<float>(); @@ -203,8 +203,8 @@ TEST(ImageTransformer, Resize) { EXPECT_EQ(resize_width, resized.size().width); size_t new_size = resize_height * resize_width * channel; float* xt = new float[new_size]; - for (size_t i = 0; i < resize_height; i++) - for (size_t j = 0; j < resize_width; j++) + for (int i = 0; i < resize_height; i++) + for (int j = 0; j < resize_width; j++) for (size_t k = 0; k < channel; k++) xt[i * resize_width * channel + j * channel + k] = resized.at<cv::Vec3f>(i, j)[k]; @@ -232,7 +232,7 @@ TEST(ImageTransformer, Crop) { for (size_t w = 0; w < crop_width; w++) for (size_t c = 0; c < channel; c++) { size_t out_idx = c * crop_height * crop_width + h * crop_width + w; - size_t in_idx = c * height * width + (h + crop_h_offset) + size_t in_idx = c * height * width + (h + crop_h_offset) * width + w + crop_w_offset; EXPECT_EQ(x[in_idx], y[out_idx]); }
