SINGA-186 Create Python Tensor class
- Add numpy.i in src/python/swig
. core_tensor.i is revised to include it
. GetValue method is added in singa::Tensor class (tensor.h)
. in tensor.py, the member methods are added to get/set numpy array from/to
tensor data
. copy_from_numpy, which calls singa::Tensor::CopyDataFromHostPtr
. copy_to_numpy, which calls singa::Tensor::GetValue
. also global functions are added
. from_numpy
. to_numpy
NOTE: numpy.i is downloaded from
https://github.com/numpy/numpy/tree/master/tools/swig
TODO: unittest_python.py
Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/50f67226
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/50f67226
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/50f67226
Branch: refs/heads/dev
Commit: 50f672267c8f2d88951c804cad6df833f89b0428
Parents: 7cfdb99
Author: chonho <[email protected]>
Authored: Thu Jun 30 21:54:52 2016 +0800
Committer: chonho <[email protected]>
Committed: Fri Jul 1 11:44:01 2016 +0800
----------------------------------------------------------------------
include/singa/core/tensor.h | 7 +
src/python/swig/core_tensor.i | 54 +-
src/python/swig/numpy.i | 3119 +++++++++++++++++++++++++++++++++++
src/python/tensor.py | 77 +-
test/python/unittest_python.py | 77 +-
5 files changed, 3243 insertions(+), 91 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/50f67226/include/singa/core/tensor.h
----------------------------------------------------------------------
diff --git a/include/singa/core/tensor.h b/include/singa/core/tensor.h
index 4ef3286..3b4304a 100644
--- a/include/singa/core/tensor.h
+++ b/include/singa/core/tensor.h
@@ -81,6 +81,13 @@ class Tensor {
return static_cast<const SType *>(block()->data());
}
+ template <typename SType>
+ void GetValue(SType *value, const size_t num) {
+ CHECK(device_ == defaultDevice);
+ const SType* ptr = data<SType>();
+ for(size_t i = 0; i < num; i++) value[i] = ptr[i];
+ }
+
/// data type, including kFloat16, kFloat32, kInt
const DataType data_type() const { return data_type_; }
http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/50f67226/src/python/swig/core_tensor.i
----------------------------------------------------------------------
diff --git a/src/python/swig/core_tensor.i b/src/python/swig/core_tensor.i
index e30f2ef..f723d1b 100644
--- a/src/python/swig/core_tensor.i
+++ b/src/python/swig/core_tensor.i
@@ -40,6 +40,17 @@
using singa::DataType;
%}
+%include "numpy.i"
+%init %{
+ import_array();
+%}
+%apply (float *IN_ARRAY1, int DIM1) {
+ (const float *src, const size_t num)
+}
+%apply (float *ARGOUT_ARRAY1, int DIM1) {
+ (float *value, const size_t num)
+}
+
%template(Shape) std::vector<size_t>;
namespace singa{
@@ -72,6 +83,10 @@ namespace singa{
%template(charData) data<const char*>;
%template(doubleData) data<const double*>;
+ template <typename SType> void GetValue(SType* value, const size_t num);
+ %template(floatGetValue) GetValue<float>;
+ //void ToArray(float *value, const size_t num);
+
const DataType data_type() const;
const std::vector<size_t> &shape() const;
const size_t shape(size_t idx) const;
@@ -88,12 +103,14 @@ namespace singa{
template <typename SType> void SetValue(const SType x);
%template(floatSetValue) SetValue<float>;
- // ...
+ /* TODO(chonho-01) other types */
+ // --- other types
template <typename DType> void CopyDataFromHostPtr(const DType *src,
const size_t num,
const size_t offset);
- %template(floatCopyData) CopyDataFromHostPtr<float>;
+ %template(floatCopyDataFromHostPtr) CopyDataFromHostPtr<float>;
+ // --- other types
void CopyData(const Tensor &other);
Tensor Clone() const;
@@ -109,26 +126,22 @@ namespace singa{
template <typename DType> Tensor &operator+=(const DType x);
%template(iAdd_f) operator+=<float>;
- /* TODO(chonho-01) for other types */
- // ...
+ // --- other types
template <typename DType> Tensor &operator-=(DType x);
%template(iSub_f) operator-=<float>;
- /* TODO(chonho-01) for other types */
- // ...
+ // --- other types
template <typename DType> Tensor &operator*=(DType x);
%template(iMul_f) operator*=<float>;
- /* TODO(chonho-01) for other types */
- // ...
+ // --- other types
template <typename DType> Tensor &operator/=(DType x);
%template(iDiv_f) operator/=<float>;
- /* TODO(chonho-01) for other types */
- // ...
+ // --- other types
- /*TODO(chonho-08-b)
+ /*TODO(chonho-04)
amax
amin
asum
@@ -137,9 +150,6 @@ namespace singa{
};
- /* TODO(chonho-02)
- inline void CheckDataTypeAndLang(const Tensor &in1, const Tensor &in2);
- */
void CopyDataToFrom(Tensor *dst, const Tensor &src, size_t num,
size_t src_offset = 0, size_t dst_offset = 0);
@@ -160,7 +170,7 @@ namespace singa{
%template(floatSum) Sum<float>;
// --- other types
- /* TODO(chonho-04)
+ /* TODO(chonho-02)
need to implement the average of all elements ??? */
Tensor Average(const Tensor &t, int axis);
@@ -205,7 +215,7 @@ namespace singa{
%template(op) operator>= <float>;
// --- other types
- /* TODO(chonho-06)
+ /* NOTE(chonho)
no need to include theses
in python, these can be replaced with comparison operators
@@ -284,27 +294,23 @@ namespace singa{
template <typename SType>
void Bernoulli(const SType p, Tensor *out);
%template(floatBernoulli) Bernoulli<float>;
- /* TODO for other types */
- // ...
+ // --- other types
template <typename SType>
void Gaussian(const SType mean, const SType std, Tensor *out);
%template(floatGaussian) Gaussian<float>;
- /* TODO for other types */
- // ...
+ // --- other types
template <typename SType>
void Uniform(const SType low, const SType high, Tensor *out);
%template(floatUniform) Uniform<float>;
- /* TODO for other types */
- // ...
+ // --- other types
/* ========== Blas operations ========== */
template <typename SType>
void Axpy(SType alpha, const Tensor &in, Tensor *out);
%template(floatAxpy) Axpy<float>;
- /* TODO for other types */
- // ...
+ // --- other types
Tensor Mult(const Tensor &A, const Tensor &B);
void Mult(const Tensor &A, const Tensor &B, Tensor *C);