SINGA-182 Clean math function APIs and implementations Add comments (guides) in corresponding math function files.
Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/fbd52197 Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/fbd52197 Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/fbd52197 Branch: refs/heads/dev Commit: fbd52197e369e1066ad367bcbde502f451462190 Parents: 3171459 Author: Wei Wang <[email protected]> Authored: Thu May 26 14:46:50 2016 +0800 Committer: Wei Wang <[email protected]> Committed: Thu May 26 14:46:50 2016 +0800 ---------------------------------------------------------------------- src/core/tensor/math_kernel.h | 2 ++ src/core/tensor/tensor_math.h | 41 ++++++++++++++++++++++----------- src/core/tensor/tensor_math_cpp.h | 3 +++ src/core/tensor/tensor_math_cuda.h | 1 + 4 files changed, 33 insertions(+), 14 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/fbd52197/src/core/tensor/math_kernel.h ---------------------------------------------------------------------- diff --git a/src/core/tensor/math_kernel.h b/src/core/tensor/math_kernel.h index 5367f4a..b016007 100644 --- a/src/core/tensor/math_kernel.h +++ b/src/core/tensor/math_kernel.h @@ -25,6 +25,8 @@ #include "singa_config.h" #ifdef USE_CUDA +/// TODO(wangwei) Clean the function APIs as commented in tensor_math.h +/// Add 'Context *ctx' as an argument of all cuda functions. namespace singa { /* void softmaxloss_forward(int n, int dim, const float *prob, http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/fbd52197/src/core/tensor/tensor_math.h ---------------------------------------------------------------------- diff --git a/src/core/tensor/tensor_math.h b/src/core/tensor/tensor_math.h index d55e15a..b53d4cb 100644 --- a/src/core/tensor/tensor_math.h +++ b/src/core/tensor/tensor_math.h @@ -27,21 +27,19 @@ namespace singa { /// operations. /// All functions have a template argument, DType for DataType, Lang for the /// device programming language, e.g., Langice::kCpp, Langice::kCuda +/// +/// TODO(wangwei) Clean the functions to make the function APIs consistent: +/// 1. All function names should be like XxxYyy or XY, i.e., capitablize the first +/// letter. +/// 2. Order functions based on function name in alphabetical order. +/// 3. Function arguments order is [const basic type] [const Blob] [mutable Blob]. +/// 4. Function argument names, use 'num' for total number of elements in +/// elementwise operations; use 'in1' 'in2' for input blobs; use 'out' for +/// output blob or value. With exceptions for some functions, e.g., +/// Scale(const float alpha, const Blob* in, Blob* out); +/// For such cases, use x, v, alpha, etc for scalar types. +/// For blas functions, follow the blas style for argument names. -/// Some operations would have many config/hyper-parameters, e.g., Conv, and -/// these config vary among diff implementations, e.g., cuda/cudnn/opencl. -/// To separate the modules, we pass a OpConf pointer to the Tensor Op function. -/// The specific fields are implemented by inheriting OpConf, and casting the -/// pointer between the base and the sub-class. -class OpConf { - public: - template <typename T> - T* CastTo() { - static_assert(std::is_base_of<OpConf, T>::value, - "The cast type must be a sub-class of OpConf"); - return static_cast<T*>(this); - } -}; // ================Linear algebra functions==================================== /// ret[i] = |input[i]| @@ -292,6 +290,21 @@ void Gaussian(int count, float mean, float std, Blob* ret, Context* ctx) { LOG(FATAL) << "Not Implemented"; } +/*Some operations would have many config/hyper-parameters, e.g., Conv, and +these config vary among diff implementations, e.g., cuda/cudnn/opencl. +To separate the modules, we pass a OpConf pointer to the Tensor Op function. +The specific fields are implemented by inheriting OpConf, and casting the +pointer between the base and the sub-class. +class OpConf { + public: + template <typename T> + T* CastTo() { + static_assert(std::is_base_of<OpConf, T>::value, + "The cast type must be a sub-class of OpConf"); + return static_cast<T*>(this); + } +}; +*/ } // namespace singa #endif // SINGA_CORE_MATH_H_ http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/fbd52197/src/core/tensor/tensor_math_cpp.h ---------------------------------------------------------------------- diff --git a/src/core/tensor/tensor_math_cpp.h b/src/core/tensor/tensor_math_cpp.h index 7dc35c9..5ce33ad 100644 --- a/src/core/tensor/tensor_math_cpp.h +++ b/src/core/tensor/tensor_math_cpp.h @@ -24,6 +24,9 @@ #include <cblas.h> #endif +/// TODO(wangwei) Clean the implementations following the comments in +/// tensor_math.h. +/// For Blob argument xxx, name its pointer as xxxPtr. namespace singa { template <> void Square<float, lang::Cpp>(int count, const Blob* input, http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/fbd52197/src/core/tensor/tensor_math_cuda.h ---------------------------------------------------------------------- diff --git a/src/core/tensor/tensor_math_cuda.h b/src/core/tensor/tensor_math_cuda.h index 12fc58e..f26b5a3 100644 --- a/src/core/tensor/tensor_math_cuda.h +++ b/src/core/tensor/tensor_math_cuda.h @@ -26,6 +26,7 @@ namespace singa { +// TODO(wangwei) Clean implementations following comments in tensor_math_cpp.h. // TODO(wangwei) optimize using stream template<> void Add<float, lang::Cuda>(int count, const Blob* lhs, const Blob* rhs,
