SINGA-186 Create Python Tensor class - In tensor.py, Rename copy_to_numpy to to_numpy
- Revised core_device.i to avoid memory leak of std::shared_ptr<Device> - Revised device.h to avoid non-virtual-desturcotr wanrning TODO: still need to add more test cases Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/d83f5d52 Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/d83f5d52 Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/d83f5d52 Branch: refs/heads/dev Commit: d83f5d52d71447d9f8be5353fbeaa67766f6f767 Parents: 50f6722 Author: chonho <[email protected]> Authored: Fri Jul 1 13:40:10 2016 +0800 Committer: chonho <[email protected]> Committed: Fri Jul 1 13:40:10 2016 +0800 ---------------------------------------------------------------------- include/singa/core/device.h | 7 ++- src/core/device/device.cc | 2 + src/python/swig/core_device.i | 7 ++- src/python/tensor.py | 4 +- test/python/unittest_python.py | 87 ++++++++++++++++++------------------- 5 files changed, 57 insertions(+), 50 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d83f5d52/include/singa/core/device.h ---------------------------------------------------------------------- diff --git a/include/singa/core/device.h b/include/singa/core/device.h index f2b77b6..8c4546f 100644 --- a/include/singa/core/device.h +++ b/include/singa/core/device.h @@ -84,6 +84,8 @@ class Device { int id() const { return id_; } + virtual ~Device() = 0; + protected: /// Execute one operation on one executor. virtual void DoExec(function<void(Context*)>&& fn, int executor) = 0; @@ -117,6 +119,7 @@ class Device { /// It runs cpp code. class CppCPU : public Device { public: + ~CppCPU() {}; CppCPU(int id = -1, int num_executors = 1, string scheduler = "sync", string vm = "gc-only"); @@ -148,7 +151,7 @@ class CudaGPU : public Device { ~CudaGPU(); CudaGPU(int id = 0, int num_executors = 1, string scheduler = "sync", string vm = "gc-only"); - CudaGPU(const MemPoolConf& mem_conf, + CudaGPU(const MemPoolConf& mem_conf, int id = 0, int num_executors = 1, string scheduler = "sync"); void SetRandSeed(unsigned seed) override; @@ -183,7 +186,7 @@ class CudaGPU : public Device { /// Free cpu memory. void Free(void* ptr) override; - + private: DeviceMemPool* pool; }; http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d83f5d52/src/core/device/device.cc ---------------------------------------------------------------------- diff --git a/src/core/device/device.cc b/src/core/device/device.cc index 6775e40..071b891 100644 --- a/src/core/device/device.cc +++ b/src/core/device/device.cc @@ -25,6 +25,8 @@ Device::Device(int id, int num_executors, string scheduler, string vm) host_ = defaultDevice; } +Device::~Device() {} + void Device::Exec(function<void(Context*)>&& fn, const vector<Block*> read_blocks, const vector<Block*> write_blocks, bool use_rand_generator) { // TODO(wangwei) execute operations scheduled by the scheduler. http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d83f5d52/src/python/swig/core_device.i ---------------------------------------------------------------------- diff --git a/src/python/swig/core_device.i b/src/python/swig/core_device.i index 50cee3e..7430620 100644 --- a/src/python/swig/core_device.i +++ b/src/python/swig/core_device.i @@ -24,14 +24,19 @@ %module core_device %include "std_vector.i" %include "std_string.i" +%include "std_shared_ptr.i" %{ #include "singa/core/device.h" %} +/* smart pointer to avoid memory leak */ +%shared_ptr(singa::Device); +%shared_ptr(singa::CppCPU); +%shared_ptr(singa::CudaGPU); + namespace singa{ - %nodefault Device; class Device { public: virtual void SetRandSeed(unsigned seed) = 0; http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d83f5d52/src/python/tensor.py ---------------------------------------------------------------------- diff --git a/src/python/tensor.py b/src/python/tensor.py index b8abb5c..35d6b1f 100644 --- a/src/python/tensor.py +++ b/src/python/tensor.py @@ -89,7 +89,7 @@ class Tensor(object): data = np.array(data, dtype=dt).reshape(self.tuple_shape) return data - def copy_to_numpy(self): + def to_numpy(self): ''' this method gets the values of tensor data and returns it as numpy array ''' @@ -322,7 +322,7 @@ def from_numpy(np_array): def to_numpy(t): - return t.copy_to_numpy() + return t.to_numpy() def abs(t): http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/d83f5d52/test/python/unittest_python.py ---------------------------------------------------------------------- diff --git a/test/python/unittest_python.py b/test/python/unittest_python.py index 569a7ad..320703d 100644 --- a/test/python/unittest_python.py +++ b/test/python/unittest_python.py @@ -39,7 +39,7 @@ from core_pb2 import * class TestTensorMethods(unittest.TestCase): def setUp(self): - self.shape = (2, 3) + self.shape = (3, 2) self.t = Tensor(self.shape) self.s = Tensor(self.shape) @@ -49,65 +49,63 @@ class TestTensorMethods(unittest.TestCase): self.assertTupleEqual(t.shape(), shape) self.assertEqual(t.shape(0), shape[0]) self.assertEqual(t.shape(1), shape[1]) - self.assertEqual(product(shape), 2*3) + self.assertEqual(product(shape), 3*2) self.assertEqual(t.ndim(), 2) - self.assertEqual(t.size(), 2*3) - self.assertEqual(t.memsize(), 2*3*sizeof(kFloat32)) + self.assertEqual(t.size(), 3*2) + self.assertEqual(t.memsize(), 3*2*sizeof(kFloat32)) self.assertFalse(t.is_transpose()) - print 'Done tensor fields' def test_unary_operators(self): t = self.t arr = np.array([[1.0,2.0],[2.0,3.0],[3.0,4.0]], dtype=np.float32) - t.copy_data_from(arr) - self.assertAlmostEqual(t.to_array()[0,0], 1.0) - self.assertAlmostEqual(t.to_array()[0,1], 2.0) + t.copy_from_numpy(arr) + self.assertAlmostEqual(t.to_numpy()[0,0], arr[0,0]) + self.assertAlmostEqual(t.to_numpy()[0,1], arr[0,1]) + self.assertAlmostEqual(t.to_numpy()[2,1], arr[2,1]) t += 1.23 - self.assertAlmostEqual(t.to_array()[0,0], 1.0+1.23) + self.assertAlmostEqual(to_numpy(t)[0,0], 1.0+1.23) t -= 0.23 - self.assertAlmostEqual(t.to_array()[0,0], 2.23-0.23) + self.assertAlmostEqual(to_numpy(t)[0,0], 2.23-0.23) t *= 2.5 - self.assertAlmostEqual(t.to_array()[0,0], (2.23-0.23)*2.5) + self.assertAlmostEqual(to_numpy(t)[0,0], (2.23-0.23)*2.5) t /= 2 - self.assertAlmostEqual(t.to_array()[0,0], (2.23-0.23)*2.5/2) - print 'Done unary_operators' + self.assertAlmostEqual(to_numpy(t)[0,0], (2.23-0.23)*2.5/2) def test_binary_operators(self): t = self.t arr = np.array([[1.0,2.0],[2.0,3.0],[3.0,4.0]], dtype=np.float32) - t.copy_data_from(arr) + t.copy_from_numpy(arr) s = self.s arr = np.array([[4.0,3.0],[3.0,2.0],[2.0,1.0]], dtype=np.float32) - s.from_array(arr) + s.copy_from_numpy(arr) a = t + s - self.assertAlmostEqual(a.to_array()[0,0], 1.0+4.0) + self.assertAlmostEqual(to_numpy(a)[0,0], 1.0+4.0) a = t - s - self.assertAlmostEqual(a.to_array()[0,0], 1.0-4.0) + self.assertAlmostEqual(to_numpy(a)[0,0], 1.0-4.0) a = t * s - self.assertAlmostEqual(a.to_array()[0,0], 1.0*4.0) + self.assertAlmostEqual(to_numpy(a)[0,0], 1.0*4.0) a = t / s - self.assertAlmostEqual(a.to_array()[0,0], 1.0/4.0) - print 'Done binary_operators' + self.assertAlmostEqual(to_numpy(a)[0,0], 1.0/4.0) def test_comparison_operators(self): t = self.t - t += 3.45 + t.set_value(3.45) a = t < 3.45 - self.assertEqual(a.to_array()[0,0], 0) + self.assertEqual(to_numpy(a)[0,0], 0) a = t <= 3.45 - self.assertEqual(a.to_array()[0,0], 1) + self.assertEqual(to_numpy(a)[0,0], 1) a = t > 3.45 - self.assertEqual(a.to_array()[0,0], 0) + self.assertEqual(to_numpy(a)[0,0], 0) a = t >= 3.45 - self.assertEqual(a.to_array()[0,0], 1) + self.assertEqual(to_numpy(a)[0,0], 1) a = lt(t, 3.45) - self.assertEqual(a.to_array()[0,0], 0) + self.assertEqual(to_numpy(a)[0,0], 0) a = le(t, 3.45) - self.assertEqual(a.to_array()[0,0], 1) + self.assertEqual(to_numpy(a)[0,0], 1) a = gt(t, 3.45) - self.assertEqual(a.to_array()[0,0], 0) + self.assertEqual(to_numpy(a)[0,0], 0) a = ge(t, 3.45) - self.assertEqual(a.to_array()[0,0], 1) + self.assertEqual(to_numpy(a)[0,0], 1) def test_tensor_manipulation(self): #TODO(chonho) @@ -119,34 +117,33 @@ class TestTensorMethods(unittest.TestCase): def test_tensor_copy(self): t = Tensor((2,3)) - t += 1.23 - self.assertAlmostEqual(t.to_array()[0,0], 1.23) + t.set_value(1.23) + self.assertAlmostEqual(to_numpy(t)[0,0], 1.23) tc = t.copy() tdc = t.deepcopy() - self.assertAlmostEqual(tc.to_array()[0,0], 1.23) - self.assertAlmostEqual(tdc.to_array()[0,0], 1.23) + self.assertAlmostEqual(to_numpy(tc)[0,0], 1.23) + self.assertAlmostEqual(to_numpy(tdc)[0,0], 1.23) t += 1.23 - self.assertAlmostEqual(t.to_array()[0,0], 2.46) - self.assertAlmostEqual(tc.to_array()[0,0], 2.46) - self.assertAlmostEqual(tdc.to_array()[0,0], 1.23) + self.assertAlmostEqual(to_numpy(t)[0,0], 2.46) + self.assertAlmostEqual(to_numpy(tc)[0,0], 2.46) + self.assertAlmostEqual(to_numpy(tdc)[0,0], 1.23) def test_copy_data(self): t = self.t - t += 1.23 + t.set_value(1.23) s = self.s - s += 5.43 - self.assertAlmostEqual(t.to_array()[0,0], 1.23) + s.set_value(5.43) + self.assertAlmostEqual(t.to_numpy()[0,0], 1.23) copy_data_to_from(t, s, 2) - self.assertAlmostEqual(t.to_array()[0,0], 5.43, 5) - self.assertAlmostEqual(t.to_array()[0,1], 5.43, 5) - self.assertAlmostEqual(t.to_array()[0,2], 1.23) - + self.assertAlmostEqual(t.to_numpy()[0,0], 5.43, 5) + self.assertAlmostEqual(t.to_numpy()[0,1], 5.43, 5) + self.assertAlmostEqual(t.to_numpy()[1,0], 1.23, 5) def test_global_method(self): t = self.t - t += 12.34 + t.set_value(12.34) a = log(t) - self.assertAlmostEqual(a.to_array()[0,0], math.log(12.34)) + self.assertAlmostEqual(to_numpy(a)[0,0], math.log(12.34), 5) if __name__ == '__main__': unittest.main()
