Repository: incubator-singa
Updated Branches:
  refs/heads/master e9b2c964c -> c967169bc


SINGA-259 Add maven pom file for building java classes

This ticket would refactor some API in *.i files and create the pom.xml
to use maven to compile the java classes. Java unittest is also enabled
with a simple example.

To test this ticket,
1. compile singa with java  (cuda and cudnn are optional) in `build/`

        cmake -DUSE_JAVA=ON -DUSE_PYTHON=OFF ..
        make

2. goto build/java and run the unit tests

    export LD_LIBRARY_PATH=<path to
    build>/java/lib:$LD_LIBRARY_PATH
    mvn test

The folder structure for the java project is

    java/
    pom.xml
    lib/libsinga_wrap.so
    src/
      main/java/org/apache/singa/
                              swig/
                              proto/
      test/org/apache/singa/

Fixed a bug in tensor.py::from_numpy() which handled int array incorrectly.


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/c967169b
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/c967169b
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/c967169b

Branch: refs/heads/master
Commit: c967169bc1832239fc79a89171e8b372a04e143b
Parents: e9b2c96
Author: Wei Wang <wang...@comp.nus.edu.sg>
Authored: Mon Oct 10 15:48:00 2016 +0800
Committer: Wei Wang <wang...@comp.nus.edu.sg>
Committed: Mon Oct 10 23:49:48 2016 +0800

----------------------------------------------------------------------
 cmake/Dependencies.cmake                        |   4 +-
 java/CMakeLists.txt                             |  61 ++---
 java/pom.xml                                    |  38 +++
 .../test/java/org/apache/singa/TestTensor.java  |  59 +++++
 python/CMakeLists.txt                           |  24 +-
 python/singa/tensor.py                          | 150 ++++++------
 src/CMakeLists.txt                              |   8 +-
 src/api/core_device.i                           |   8 +-
 src/api/core_tensor.i                           | 230 +++++++------------
 src/api/io_snapshot.i                           |   4 +-
 src/api/model_layer.i                           |  22 +-
 src/core/tensor/tensor.cc                       |   3 +-
 src/core/tensor/tensor_math_cpp.h               |   7 +
 src/proto/core.proto                            |   2 +-
 src/proto/io.proto                              |   2 +-
 src/proto/model.proto                           |   2 +-
 test/python/test_tensor.py                      |  19 +-
 17 files changed, 358 insertions(+), 285 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/cmake/Dependencies.cmake
----------------------------------------------------------------------
diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
index d9dbae7..6ad15c0 100644
--- a/cmake/Dependencies.cmake
+++ b/cmake/Dependencies.cmake
@@ -95,7 +95,7 @@ IF(USE_PYTHON)
 ENDIF()
 
 IF(USE_JAVA)
-    FIND_PACKAGE(Java)
-    FIND_PACKAGE(JNI )
+    FIND_PACKAGE(Java REQUIRED)
+    FIND_PACKAGE(JNI REQUIRED)
     FIND_PACKAGE(SWIG 3.0 REQUIRED)
 ENDIF()

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/java/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/java/CMakeLists.txt b/java/CMakeLists.txt
index ef38461..7f44198 100644
--- a/java/CMakeLists.txt
+++ b/java/CMakeLists.txt
@@ -19,9 +19,19 @@
 # This following function is taken from
 # https://github.com/Kitware/CMake/blob/master/Modules/FindProtobuf.cmake
 # and modified to our compilation.
+function(CAPTALIZE_STRING INPUT OUTPUT)
+  string(TOUPPER ${ARGV0} UPP)
+  string(LENGTH ${ARGV0} LEN)
+  string(SUBSTRING ${UPP} 0 1 PREFIX)
+  math(EXPR LEN "${LEN} - 1")
+  string(SUBSTRING ${ARGV0} 1 ${LEN} SUFFIX)
+  string(CONCAT RET ${PREFIX} ${SUFFIX})
+  set(${OUTPUT} ${RET} PARENT_SCOPE)
+endfunction()
+
 function(PROTOBUF_GENERATE_JAVA OUTPUT)
     if(NOT ARGN)
-        message(SEND_ERROR "Error: PROTOBUF_GENERATE_JAVA() called 
+        message(SEND_ERROR "Error: PROTOBUF_GENERATE_JAVA() called
         without any proto files")
         return()
     endif(NOT ARGN)
@@ -31,18 +41,18 @@ function(PROTOBUF_GENERATE_JAVA OUTPUT)
         get_filename_component(ABS_FIL ${FIL} ABSOLUTE)
         get_filename_component(FIL_WE ${FIL} NAME_WE)
         get_filename_component(PATH ${FIL} PATH)
-
-        list(APPEND ${OUTPUT} 
"${CMAKE_BINARY_DIR}/java/singa/proto/${FIL_WE}.java")
+        CAPTALIZE_STRING(${FIL_WE} FIL_WE1)
+        list(APPEND ${OUTPUT} 
"${CMAKE_BINARY_DIR}/java/src/main/java/org/apache/singa/proto/${FIL_WE1}.java")
 
         add_custom_command(
-            OUTPUT "${CMAKE_BINARY_DIR}/java/singa/proto/${FIL_WE}.java"
+            OUTPUT 
"${CMAKE_BINARY_DIR}/java/src/main/java/org/apache/singa/proto/${FIL_WE1}.java"
             COMMAND ${PROTOBUF_PROTOC_EXECUTABLE}
-            ARGS --java_out ${CMAKE_BINARY_DIR}/java
+            ARGS --java_out ${CMAKE_BINARY_DIR}/java/src/main/java
                  --proto_path ${PATH} ${ABS_FIL}
             DEPENDS ${ABS_FIL}
             COMMENT "Running java protocol buffer compiler on ${FIL}" VERBATIM)
     endforeach()
-    
+
     set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED 
TRUE)
     set(${OUTPUT} ${${OUTPUT}} PARENT_SCOPE)
 endfunction()
@@ -62,6 +72,7 @@ function (create_symlinks)
         # Get OS dependent path to use in `execute_process`
         file(TO_NATIVE_PATH "${CMAKE_CURRENT_BINARY_DIR}/${path_file}" link)
         file(TO_NATIVE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/${path_file}" target)
+        # MESSAGE(STATUS  "${link}->${target}")
 
         if (UNIX)
             set(command ln -s ${target} ${link})
@@ -69,7 +80,7 @@ function (create_symlinks)
             set(command cmd.exe /c mklink ${link} ${target})
         endif()
 
-        execute_process(COMMAND ${command} 
+        execute_process(COMMAND ${command}
                         RESULT_VARIABLE result
                         ERROR_VARIABLE output)
 
@@ -80,38 +91,37 @@ function (create_symlinks)
     endforeach(path_file)
 endfunction(create_symlinks)
 
+# generate cxx and wrap.py
+file(MAKE_DIRECTORY 
${CMAKE_BINARY_DIR}/java/src/main/java/org/apache/singa/swig)
+file(MAKE_DIRECTORY 
${CMAKE_BINARY_DIR}/java/src/main/java/org/apache/singa/proto)
+file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/java/src/test/java/org/apache/singa)
+file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/java/lib)
+file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/src/api)
 
 # generate protobuf sources
 FILE(GLOB proto_files ${CMAKE_SOURCE_DIR}/src/proto/*.proto)
 PROTOBUF_GENERATE_JAVA(proto_javas ${proto_files})
-MESSAGE(STATUS "proto javas: ${proto_javas}")
+# MESSAGE(STATUS "proto javas: ${proto_javas}")
 
-# generate cxx and wrap.py
-if(NOT EXISTS "${CMKAE_BINARY_DIR}/java")
-    execute_process(
-        COMMAND mkdir ${CMAKE_BINARY_DIR}/java
-        COMMAND mkdir ${CMAKE_BINARY_DIR}/java/singa
-        COMMAND mkdir ${CMAKE_BINARY_DIR}/java/singa/proto
-        ERROR_QUIET)
-endif()
 execute_process(
-    COMMAND mkdir ${CMAKE_BINARY_DIR}/src/api
-    COMMAND swig -c++ -java -I${CMAKE_SOURCE_DIR}/include 
+    COMMAND swig -c++ -java -I${CMAKE_SOURCE_DIR}/include
     -I${JAVA_INCLUDE_PATH} -I${JAVA_INCLUDE_PATH2}
-    -outdir ${CMAKE_BINARY_DIR}/java/singa
-    -package singa 
+    -package org.apache.singa.swig
+    -outdir ${CMAKE_BINARY_DIR}/java/src/main/java/org/apache/singa/swig
     -o ${CMAKE_BINARY_DIR}/src/api/singa_java_wrap.cxx
-    ${CMAKE_SOURCE_DIR}/src/api/singa.i )
+    ${CMAKE_SOURCE_DIR}/src/api/singa.i)
 
 #MESSAGE(STATUS "java include: ${JAVA_INCLUDE_DIRS} ${JNI_INCLUDE_DIRS} 
${JAVA_INCLUDE_PATH} ${JAVA_INCLUDE_PATH2}")
 set(java_srcs "${CMAKE_BINARY_DIR}/src/api/singa_java_wrap.cxx")
 
 #Create symlinks for all java source files  Do not omit !!!RELATIVE!!!
 file(GLOB_RECURSE java_source_files RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} 
*.java)
+file(GLOB_RECURSE java_xml_files RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} *.xml)
+list(APPEND java_source_files ${java_xml_files})
 create_symlinks(${java_source_files})
 
 IF(USE_CUDA)
-# remain this custom command to avoid cuda objs can't find 
+# remain this custom command to avoid cuda objs can't find
 ADD_CUSTOM_COMMAND(
     OUTPUT ${global_cuda_objs}
     COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/"
@@ -124,15 +134,14 @@ TARGET_LINK_LIBRARIES(singa_wrap ${SINGA_LINKER_LIBS} 
${JNI_LIBRARIES})
 TARGET_INCLUDE_DIRECTORIES(singa_wrap PRIVATE ${JAVA_INCLUDE_PATH} 
${JAVA_INCLUDE_PATH2})
 
 SET_TARGET_PROPERTIES(singa_wrap
-    PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/java/singa
+    PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/java/lib
 )
 
 IF(APPLE)
 ADD_CUSTOM_TARGET(
-    change_suffix ALL 
-    COMMAND ${CMAKE_COMMAND} -E rename 
"${CMAKE_BINARY_DIR}/java/singa/libsinga_wrap.dylib" 
"${CMAKE_BINARY_DIR}/java/singa/libsinga_wrap.so"
+    change_suffix ALL
+    COMMAND ${CMAKE_COMMAND} -E rename 
"${CMAKE_BINARY_DIR}/java/lib/libsinga_wrap.dylib" 
"${CMAKE_BINARY_DIR}/java/lib/libsinga_wrap.so"
     COMMENT "change .dylib to .so in mac system"
 )
 ADD_DEPENDENCIES(change_suffix _singa_wrap)
 ENDIF(APPLE)
-

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/java/pom.xml
----------------------------------------------------------------------
diff --git a/java/pom.xml b/java/pom.xml
new file mode 100644
index 0000000..e29faec
--- /dev/null
+++ b/java/pom.xml
@@ -0,0 +1,38 @@
+<project xmlns="http://maven.apache.org/POM/4.0.0";
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance";
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+                             http://maven.apache.org/maven-4_0_0.xsd";>
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.singa</groupId>
+  <artifactId>junit-tests</artifactId>
+  <packaging>jar</packaging>
+  <version>1.0-SNAPSHOT</version>
+  <name>junit-tests</name>
+  <build>
+    <plugins>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-compiler-plugin</artifactId>
+        <configuration>
+          <source>1.5</source>
+          <target>1.5</target>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+  <dependencies>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <version>4.5</version>
+      <scope>compile</scope>
+    </dependency>
+    <!-- https://mvnrepository.com/artifact/com.google.protobuf/protobuf-java 
-->
+       <dependency>
+           <groupId>com.google.protobuf</groupId>
+           <artifactId>protobuf-java</artifactId>
+           <version>2.6.1</version>
+           <scope>compile</scope>
+       </dependency>
+  </dependencies>
+</project>

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/java/src/test/java/org/apache/singa/TestTensor.java
----------------------------------------------------------------------
diff --git a/java/src/test/java/org/apache/singa/TestTensor.java 
b/java/src/test/java/org/apache/singa/TestTensor.java
new file mode 100644
index 0000000..c66f41a
--- /dev/null
+++ b/java/src/test/java/org/apache/singa/TestTensor.java
@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package test;
+
+import org.apache.singa.swig.*;
+
+
+import junit.framework.*;
+import static org.junit.Assert.*;
+
+public class TestTensor extends TestCase {
+
+        protected void setUp(){
+            System.loadLibrary("singa_wrap");
+          }
+       public void testTensorFunc() {
+               Shape s = new Shape(2);
+               s.set(0, 2);
+               s.set(1, 3);
+
+               Tensor t1 = new Tensor(s);
+               t1.SetFloatValue(0.1f);
+               Tensor t2 = singa_wrap.Square(t1);
+               float[] data = new float[6];
+
+               t2.GetFloatValue(data, 6);
+               for(int i = 0; i < 6; i++)
+                       assertEquals(data[i], 0.01, 1e-4);
+
+               for (int i =0; i< 6; i++)
+                       data[i] = i * 1.0f;
+               Tensor t3 = new Tensor(s);
+               t3.CopyFloatDataFromHostPtr(data, 6);
+
+
+               t3.GetFloatValue(data, 6);
+               for(int i = 0; i < 6; i++)
+                       assertEquals(data[i], i * 1.0f, 1e-4);
+
+       }
+
+}

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/python/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt
index 8bf8319..7774af0 100644
--- a/python/CMakeLists.txt
+++ b/python/CMakeLists.txt
@@ -21,7 +21,7 @@
 # and modified to our compilation.
 function(PROTOBUF_GENERATE_PYTHON OUTPUT)
     if(NOT ARGN)
-        message(SEND_ERROR "Error: PROTOBUF_GENERATE_PYTHON() called 
+        message(SEND_ERROR "Error: PROTOBUF_GENERATE_PYTHON() called
         without any proto files")
         return()
     endif(NOT ARGN)
@@ -42,7 +42,7 @@ function(PROTOBUF_GENERATE_PYTHON OUTPUT)
             DEPENDS ${ABS_FIL}
             COMMENT "Running Python protocol buffer compiler on ${FIL}" 
VERBATIM)
     endforeach()
-    
+
     set_source_files_properties(${${SRCS}} ${${HDRS}} PROPERTIES GENERATED 
TRUE)
     set(${OUTPUT} ${${OUTPUT}} PARENT_SCOPE)
 endfunction()
@@ -72,7 +72,7 @@ function (create_symlinks)
             set(command cmd.exe /c mklink ${link} ${target})
         endif()
 
-        execute_process(COMMAND ${command} 
+        execute_process(COMMAND ${command}
                         RESULT_VARIABLE result
                         ERROR_VARIABLE output)
 
@@ -90,19 +90,13 @@ PROTOBUF_GENERATE_PYTHON(proto_pys ${proto_files})
 #MESSAGE(STATUS "proto pys: ${proto_pys}")
 
 # generate cxx and wrap.py
-if(NOT EXISTS "${CMKAE_BINARY_DIR}/python")
-    execute_process(
-        COMMAND mkdir ${CMAKE_BINARY_DIR}/python
-        COMMAND mkdir ${CMAKE_BINARY_DIR}/python/singa
-        COMMAND mkdir ${CMAKE_BINARY_DIR}/python/singa/proto
-        ERROR_QUIET)
-endif()
+file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/python/singa/proto)
+file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/src/api)
 execute_process(
-    COMMAND mkdir ${CMAKE_BINARY_DIR}/src/api
-    COMMAND swig -c++ -python -I${CMAKE_SOURCE_DIR}/include 
+    COMMAND swig -c++ -python -I${CMAKE_SOURCE_DIR}/include
     -outdir ${CMAKE_BINARY_DIR}/python/singa
     -o ${CMAKE_BINARY_DIR}/src/api/singa_wrap.cxx
-    ${CMAKE_SOURCE_DIR}/src/api/singa.i )
+    ${CMAKE_SOURCE_DIR}/src/api/singa.i)
 
 set(python_srcs "${CMAKE_BINARY_DIR}/src/api/singa_wrap.cxx")
 
@@ -112,7 +106,7 @@ create_symlinks(${python_source_files})
 
 
 IF(USE_CUDA)
-# remain this custom command to avoid cuda objs can't find 
+# remain this custom command to avoid cuda objs can't find
 ADD_CUSTOM_COMMAND(
     OUTPUT ${global_cuda_objs}
     COMMAND ${CMAKE_COMMAND} -E make_directory "${CMAKE_BINARY_DIR}/"
@@ -137,7 +131,7 @@ FILE(WRITE 
${CMAKE_BINARY_DIR}/python/singa/proto/__init__.py "")
 #MESSAGE(STATUS "apple: ${APPLE}")
 IF(APPLE)
 ADD_CUSTOM_TARGET(
-    change_suffix ALL 
+    change_suffix ALL
     COMMAND ${CMAKE_COMMAND} -E rename 
"${CMAKE_BINARY_DIR}/python/singa/_singa_wrap.dylib" 
"${CMAKE_BINARY_DIR}/python/singa/_singa_wrap.so"
     COMMENT "change .dylib to .so in mac system"
 )

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/python/singa/tensor.py
----------------------------------------------------------------------
diff --git a/python/singa/tensor.py b/python/singa/tensor.py
index d08b6cb..c28f5b5 100644
--- a/python/singa/tensor.py
+++ b/python/singa/tensor.py
@@ -42,7 +42,7 @@ Example usage::
     s = r.to_numpy()  # tensor -> numpy array, r must be on cpu
 
 
-There are two set of tensor functions,
+There are two sets of tensor functions,
 
 Tensor member functions
     which would change the internal state of the Tensor instance.
@@ -192,7 +192,7 @@ class Tensor(object):
         '''
         # assert type(x) == float, 'set value only accepts float input'
         # if isinstance(x, float):
-        self.singa_tensor.floatSetValue(x)
+        self.singa_tensor.SetFloatValue(float(x))
 
     def copy_from_numpy(self, np_array, offset=0):
         ''' Copy the data from the numpy array.
@@ -206,9 +206,9 @@ class Tensor(object):
             np_array = np_array.flatten()
         dt = np_array.dtype
         if dt == np.float32:
-            self.singa_tensor.floatCopyDataFromHostPtr(np_array)
+            self.singa_tensor.CopyFloatDataFromHostPtr(np_array)
         elif dt == np.int or dt == np.int32:
-            self.singa_tensor.intCopyDataFromHostPtr(np_array)
+            self.singa_tensor.CopyIntDataFromHostPtr(np_array)
         else:
             print 'Not implemented yet for ', dt
 
@@ -256,7 +256,7 @@ class Tensor(object):
         Args:
             p (float): with probability p, each element is sample to 1.
         '''
-        singa.floatBernoulli(float(p), self.singa_tensor)
+        singa.Bernoulli(float(p), self.singa_tensor)
 
     def gaussian(self, mean, std):
         '''Generate a value for each element following a Gaussian distribution.
@@ -265,7 +265,7 @@ class Tensor(object):
             mean (float): mean of the distribution
             std (float): standard variance of the distribution
         '''
-        singa.floatGaussian(float(mean), float(std), self.singa_tensor)
+        singa.Gaussian(float(mean), float(std), self.singa_tensor)
 
     def uniform(self, low, high):
         '''Generate a value for each element following a uniform distribution.
@@ -274,7 +274,7 @@ class Tensor(object):
             low (float): the lower bound
             high (float): the hight bound
         '''
-        singa.floatUniform(float(low), float(high), self.singa_tensor)
+        singa.Uniform(float(low), float(high), self.singa_tensor)
 
     def add_column(self, v):
         '''Add a tensor to each column of this tensor.
@@ -384,91 +384,91 @@ class Tensor(object):
 
     def __add__(self, rhs):
         if isinstance(rhs, Tensor):
-            return _call_singa_func(singa.Add_TT,
-                                    self.singa_tensor, rhs.singa_tensor)
+            return from_raw_tensor(
+                singa.__add__(self.singa_tensor, rhs.singa_tensor))
         else:
-            return _call_singa_func(singa.Add_Tf,
+            return _call_singa_func(singa.AddFloat,
                                     self.singa_tensor, rhs)
 
     def __sub__(self, rhs):
         if isinstance(rhs, Tensor):
-            return _call_singa_func(singa.Sub_TT,
-                                    self.singa_tensor, rhs.singa_tensor)
+            return from_raw_tensor(
+                singa.__sub__(self.singa_tensor, rhs.singa_tensor))
         else:
-            return _call_singa_func(singa.Sub_Tf,
+            return _call_singa_func(singa.SubFloat,
                                     self.singa_tensor, rhs)
 
     def __mul__(self, rhs):
         if isinstance(rhs, Tensor):
-            return _call_singa_func(singa.EltwiseMul_TT,
-                                    self.singa_tensor, rhs.singa_tensor)
+            return from_raw_tensor(
+                singa.__mul__(self.singa_tensor, rhs.singa_tensor))
         else:
-            return _call_singa_func(singa.EltwiseMul_Tf,
+            return _call_singa_func(singa.EltwiseMulFloat,
                                     self.singa_tensor, rhs)
 
     def __div__(self, rhs):
         if isinstance(rhs, Tensor):
-            return _call_singa_func(singa.Div_TT,
-                                    self.singa_tensor, rhs.singa_tensor)
+            return from_raw_tensor(
+                singa.__div__(self.singa_tensor, rhs.singa_tensor))
         else:
-            return _call_singa_func(singa.Div_Tf,
+            return _call_singa_func(singa.DivFloat,
                                     self.singa_tensor, rhs)
 
     def __lt__(self, rhs):
         if isinstance(rhs, Tensor):
-            return _call_singa_func(singa.LT_TT, self.singa_tensor,
-                                    rhs.singa_tensor)
+            return from_raw_tensor(
+                singa.__lt__(self.singa_tensor, rhs.singa_tensor))
         else:
-            return _call_singa_func(singa.LT_Tf, self.singa_tensor, rhs)
+            return _call_singa_func(singa.LTFloat, self.singa_tensor, rhs)
 
     def __le__(self, rhs):
         if isinstance(rhs, Tensor):
-            return _call_singa_func(
-                singa.LE_TT,
-                self.singa_tensor,
-                rhs.singa_tensor)
+            return from_raw_tensor(
+                singa.__le__(self.singa_tensor, rhs.singa_tensor))
         else:
-            return _call_singa_func(singa.LE_Tf, self.singa_tensor, rhs)
+            return _call_singa_func(singa.LEFloat, self.singa_tensor, rhs)
 
     def __gt__(self, rhs):
         if isinstance(rhs, Tensor):
-            return _call_singa_func(
-                singa.GT_TT,
-                self.singa_tensor,
-                rhs.singa_tensor)
+            return from_raw_tensor(
+                singa.__gt__(self.singa_tensor, rhs.singa_tensor))
         else:
-            return _call_singa_func(singa.GT_Tf, self.singa_tensor, rhs)
+            return _call_singa_func(singa.GTFloat, self.singa_tensor, rhs)
 
     def __ge__(self, rhs):
         if isinstance(rhs, Tensor):
-            return _call_singa_func(
-                singa.GE_TT,
-                self.singa_tensor,
-                rhs.singa_tensor)
+            return from_raw_tensor(
+                singa.__ge__(self.singa_tensor, rhs.singa_tensor))
         else:
-            return _call_singa_func(singa.GE_Tf, self.singa_tensor, rhs)
+            return _call_singa_func(singa.GEFloat, self.singa_tensor, rhs)
 
     def __radd__(self, lhs):
         lhs = float(lhs)
-        return _call_singa_func(singa.Add_Tf, self.singa_tensor, lhs)
+        one = Tensor(self.shape, self.device, self.dtype)
+        one.set_value(lhs)
+        one += self
+        return one
 
     def __rsub__(self, lhs):
         lhs = float(lhs)
-        ret = _call_singa_func(singa.Sub_Tf, self.singa_tensor, lhs)
-        ret *= -1
-        return ret
+        one = Tensor(self.shape, self.device, self.dtype)
+        one.set_value(lhs)
+        one -= self
+        return one
 
     def __rmul__(self, lhs):
         lhs = float(lhs)
-        return _call_singa_func(singa.EltwiseMul_Tf, self.singa_tensor, lhs)
+        one = Tensor(self.shape, self.device, self.dtype)
+        one.set_value(lhs)
+        one *= self
+        return one
 
     def __rdiv__(self, lhs):
         lhs = float(lhs)
         one = Tensor(self.shape, self.device, self.dtype)
-        one.set_value(1)
-        one *= lhs
-        return _call_singa_func(singa.Div_TT, one.singa_tensor,
-                                self.singa_tensor)
+        one.set_value(lhs)
+        one /= self
+        return one
 
 ''' python functions for global functions in Tensor.h
 '''
@@ -538,7 +538,21 @@ def from_numpy(np_array):
     Returns:
         A Tensor instance allocated on the default CppCPU device.
     '''
-    ret = Tensor(np_array.shape)
+    assert type(np_array) is np.ndarray, 'Must input numpy array'
+    # convert to float32 array
+    if np_array.dtype == np.float64 or np_array.dtype == np.float:
+        np_array = np_array.astype(np.float32)
+
+    if np_array.dtype == np.int64 or np_array.dtype == np.int:
+        np_array = np_array.astype(np.int32)
+
+    if np_array.dtype == np.float32:
+        dtype = core_pb2.kFloat32
+    else:
+        assert np_array.dtype == np.int32, \
+            'Only float and int tensors are supported'
+        dtype = core_pb2.kInt
+    ret = Tensor(np_array.shape, dtype=dtype)
     ret.copy_from_numpy(np_array)
     return ret
 
@@ -559,9 +573,9 @@ def to_numpy(t):
         'Please move the tensor onto the default host device'
 
     if t.dtype == core_pb2.kFloat32:
-        np_array = t.singa_tensor.floatGetValue(int(t.size()))
+        np_array = t.singa_tensor.GetFloatValue(int(t.size()))
     elif t.dtype == core_pb2.kInt:
-        np_array = t.singa_tensor.intGetValue(int(t.size()))
+        np_array = t.singa_tensor.GetIntValue(int(t.size()))
     else:
         print 'Not implemented yet for ', t.dtype
     return np_array.reshape(t.shape)
@@ -670,7 +684,7 @@ def sum(t, axis=None):
     '''
 
     if axis is None:
-        return singa.floatSum(t.singa_tensor)
+        return singa.SumAsFloat(t.singa_tensor)
     else:
         return _call_singa_func(singa.Sum, t.singa_tensor, axis)
 
@@ -691,12 +705,12 @@ def pow(t, x, out=None):
         if isinstance(x, Tensor):
             return _call_singa_func(singa.Pow, t.singa_tensor, x.singa_tensor)
         else:
-            return _call_singa_func(singa.Pow_f, t.singa_tensor, x)
+            return _call_singa_func(singa.PowFloat, t.singa_tensor, x)
     else:
         if isinstance(x, Tensor):
-            singa.Pow(t.singa_tensor, x.singa_tensor, out.singa_tensor)
+            singa.PowWithRet(t.singa_tensor, x.singa_tensor, out.singa_tensor)
         else:
-            singa.Pow_f_out(t.singa_tensor, x, out.singa_tensor)
+            singa.PowFloatWitRet(t.singa_tensor, x, out.singa_tensor)
         return out
 
 
@@ -714,7 +728,7 @@ def average(t, axis=None):
     if t.ndim() > 1:
         return _call_singa_func(singa.Average, t.singa_tensor, axis)
     else:
-        return singa.floatSum(t.singa_tensor) / t.size()
+        return singa.SumAsFloat(t.singa_tensor) / t.size()
 
 
 def softmax(t, out=None):
@@ -809,7 +823,7 @@ def add(lhs, rhs, ret=None):
         if isinstance(rhs, Tensor):
             singa.Add(lhs.singa_tensor, rhs.singa_tensor, ret.singa_tensor)
         else:
-            singa.Add_Tf_out(lhs.singa_tensor, rhs, ret.singa_tensor)
+            singa.AddFloatWithRet(lhs.singa_tensor, rhs, ret.singa_tensor)
         return ret
 
 
@@ -832,7 +846,7 @@ def sub(lhs, rhs, ret=None):
         if isinstance(rhs, Tensor):
             singa.Sub(lhs.singa_tensor, rhs.singa_tensor, ret.singa_tensor)
         else:
-            singa.Sub_Tf_out(lhs.singa_tensor, rhs, ret.singa_tensor)
+            singa.SubFloatWithRet(lhs.singa_tensor, rhs, ret.singa_tensor)
         return ret
 
 
@@ -857,8 +871,8 @@ def eltwise_mult(lhs, rhs, ret=None):
             singa.EltwiseMult(lhs.singa_tensor, rhs.singa_tensor,
                               ret.singa_tensor)
         else:
-            singa.EltwiseMult_Tf_out(lhs.singa_tensor, rhs,
-                                     ret.singa_tensor)
+            singa.EltwiseMultFloatWithRet(lhs.singa_tensor, rhs,
+                                          ret.singa_tensor)
         return ret
 
 
@@ -882,8 +896,8 @@ def mult(A, B, C=None, alpha=1.0, beta=0.0):
     if C is None:
         return _call_singa_func(singa.Mult, A.singa_tensor, B.singa_tensor)
     else:
-        singa.floatMult(alpha, A.singa_tensor, B.singa_tensor,
-                        beta, C.singa_tensor)
+        singa.MultWithScale(alpha, A.singa_tensor, B.singa_tensor,
+                            beta, C.singa_tensor)
         return C
 
 
@@ -906,7 +920,7 @@ def div(lhs, rhs, ret=None):
         if isinstance(rhs, Tensor):
             singa.Div(lhs.singa_tensor, rhs.singa_tensor, ret.singa_tensor)
         else:
-            singa.Div_Tf_out(lhs.singa_tensor, rhs, ret.singa_tensor)
+            singa.DivFloatWithRet(lhs.singa_tensor, rhs, ret.singa_tensor)
         return ret
 
 
@@ -921,7 +935,7 @@ def axpy(alpha, x, y):
     Returns:
         y
     '''
-    singa.floatAxpy(float(alpha), x.singa_tensor, y.singa_tensor)
+    singa.Axpy(float(alpha), x.singa_tensor, y.singa_tensor)
     return y
 
 
@@ -935,7 +949,7 @@ def bernoulli(p, t):
     Returns:
         t
     '''
-    singa.floatBernoulli(float(p), t.singa_tensor)
+    singa.Bernoulli(float(p), t.singa_tensor)
     return t
 
 
@@ -950,7 +964,7 @@ def gaussian(mean, std, t):
     Returns:
         t
     '''
-    singa.floatGaussian(float(mean), float(std), t.singa_tensor)
+    singa.Gaussian(float(mean), float(std), t.singa_tensor)
     return t
 
 
@@ -965,7 +979,7 @@ def uniform(low, high, t):
     Returns:
         t
     '''
-    singa.floatUniform(float(low), float(high), t.singa_tensor)
+    singa.Uniform(float(low), float(high), t.singa_tensor)
     return t
 
 
@@ -982,8 +996,8 @@ def add_column(alpha, v, beta, M):
     Returns:
         M
     '''
-    singa.floatAddColumn(float(alpha), float(beta), v.singa_tensor,
-                         M.singa_tensor)
+    singa.AddColumnWithScale(float(alpha), float(beta), v.singa_tensor,
+                             M.singa_tensor)
     return M
 
 
@@ -1000,7 +1014,7 @@ def add_row(alpha, v, beta, M):
     Returns:
         M
     '''
-    singa.floatAddRow(alpha, beta, v.singa_tensor, M.singa_tensor)
+    singa.AddRowWithScale(alpha, beta, v.singa_tensor, M.singa_tensor)
     return M
 
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/src/CMakeLists.txt
----------------------------------------------------------------------
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index b9526a3..8ef4732 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -73,19 +73,19 @@ FOREACH(fil ${proto_hdrs})
     TARGET copy_protobuf POST_BUILD
     COMMAND ${CMAKE_COMMAND} -E copy_if_different ${fil}
     "${CMAKE_BINARY_DIR}/include/singa/proto/${filename}"
-    DEPENDS ${proto_files} 
+    DEPENDS ${proto_files}
  )
 ENDFOREACH()
 
 ADD_LIBRARY(singa_objects OBJECT ${singa_sources})
-IF(WIN32) 
+IF(WIN32)
   ADD_LIBRARY(singa STATIC $<TARGET_OBJECTS:singa_objects> ${cuda_objs})
   TARGET_LINK_LIBRARIES(singa ${SINGA_LINKER_LIBS})
   IF (MSVC)
        ADD_DEFINITIONS(-D_CRT_SECURE_NO_DEPRECATE)
        IF(DISABLE_WARNINGS)
          ADD_DEFINITIONS(-DDISABLE_WARNINGS)
-         SET_TARGET_PROPERTIES(singa_objects 
+         SET_TARGET_PROPERTIES(singa_objects
            PROPERTIES COMPILE_FLAGS "/wd4244 /wd4267 /wd4018 /wd4005 /wd4804 
/wd4800")
        ENDIF()
   ENDIF()
@@ -97,7 +97,7 @@ IF(UNIX OR APPLE)
   TARGET_LINK_LIBRARIES(singa ${SINGA_LINKER_LIBS})
 ENDIF()
 
-#pass configure infor to swig 
+#pass configure infor to swig
 FILE(REMOVE "${CMAKE_CURRENT_SOURCE_DIR}/api/config.i")
 CONFIGURE_FILE("${CMAKE_CURRENT_SOURCE_DIR}/api/config.i.in" 
"${CMAKE_CURRENT_SOURCE_DIR}/api/config.i")
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/src/api/core_device.i
----------------------------------------------------------------------
diff --git a/src/api/core_device.i b/src/api/core_device.i
index f3381ae..a9bb840 100644
--- a/src/api/core_device.i
+++ b/src/api/core_device.i
@@ -35,10 +35,10 @@
 %shared_ptr(singa::Device);
 
 namespace std{
-%template(sizePair) std::pair<size_t, size_t>;
-%template(vectorPair) std::vector<std::pair<size_t, size_t>>;
-%template(vectorSharedPtr) std::vector<std::shared_ptr<singa::Device>>;
-%template(deviceVec) std::vector<int>;
+%template(PairSizeT) std::pair<size_t, size_t>;
+%template(VecPairSizeT) std::vector<std::pair<size_t, size_t>>;
+%template(VecSharedPtrDevice) std::vector<std::shared_ptr<singa::Device>>;
+%template(VecInt) std::vector<int>;
 }
 
 namespace singa{

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/src/api/core_tensor.i
----------------------------------------------------------------------
diff --git a/src/api/core_tensor.i b/src/api/core_tensor.i
index d85e7f0..31562c9 100644
--- a/src/api/core_tensor.i
+++ b/src/api/core_tensor.i
@@ -22,18 +22,11 @@
 /*interface file for swig */
 
 %module core_tensor
+%include "config.i"
 %include "std_vector.i"
 %include "std_string.i"
 %include "std_shared_ptr.i"
 
-/*
-%include "carrays.i"
-%array_class(float, floatArray);
-%array_class(int, intArray);
-%array_class(char, charArray);
-%array_class(double, doubleArray);
-*/
-
 %{
 #define SWIG_FILE_WITH_INIT
 #include "singa/core/tensor.h"
@@ -49,8 +42,6 @@ using singa::DataType;
 %init %{
   import_array();
 %}
-#endif //USE_PYTHON
-
 %apply (float *IN_ARRAY1, int DIM1) {
        (const float *src, const size_t num)
 }
@@ -63,6 +54,15 @@ using singa::DataType;
 %apply (int *ARGOUT_ARRAY1, int DIM1) {
        (int *value, const size_t num)
 }
+#endif // USE_PYTHON
+
+#if USE_JAVA
+%include "arrays_java.i"
+%apply int[] {int *};
+%apply float[] {float *};
+#endif // USE_JAVA
+
+
 
 %template(Shape) std::vector<size_t>;
 
@@ -84,21 +84,18 @@ namespace singa{
     explicit Tensor(const std::vector<size_t> &shape,
                     DataType dtype = kFloat32);
     Tensor(const std::vector<size_t> &shape,
-           std::shared_ptr<singa::Device> dev, DataType dtype = kFloat32);
+           std::shared_ptr<singa::Device> dev,
+           DataType dtype = kFloat32);
     Tensor(const Tensor &from);
 
     std::shared_ptr<singa::Device> device() const;
-/*
-    template <typename DType> const DType* data() const;
-    %template(floatData) data<float>;
-    %template(intData) data<int>;
-    %template(charData) data<char>;
-    %template(doubleData) data<double>;
-    */
 
     template <typename SType> void GetValue(SType* value, const size_t num);
-    %template(floatGetValue) GetValue<float>;
-    %template(intGetValue) GetValue<int>;
+    %template(GetFloatValue) GetValue<float>;
+    %template(GetIntValue) GetValue<int>;
+
+    template <typename SType> void SetValue(const SType x);
+    %template(SetFloatValue) SetValue<float>;
 
     const DataType data_type() const;
     const std::vector<size_t> &shape() const;
@@ -115,45 +112,39 @@ namespace singa{
     float L2() const;
     float L1() const;
 
-    template <typename SType> void SetValue(const SType x);
-    %template(floatSetValue) SetValue<float>;
-    /* TODO(chonho-01) other types */
-    // --- other types
-
     template <typename DType> void CopyDataFromHostPtr(const DType *src,
                                                        const size_t num,
                                                        const size_t offset = 
0);
-    %template(floatCopyDataFromHostPtr) CopyDataFromHostPtr<float>;
-    %template(intCopyDataFromHostPtr) CopyDataFromHostPtr<int>;
-    // --- other types
+    %template(CopyFloatDataFromHostPtr) CopyDataFromHostPtr<float>;
+    %template(CopyIntDataFromHostPtr) CopyDataFromHostPtr<int>;
 
     void CopyData(const Tensor &other);
     Tensor Clone() const;
     Tensor T() const;
 
-    /* python has no assignment operator
-    Tensor &operator=(const Tensor &t); */
+#if USE_JAVA
+    %rename(iAdd) operator+=(const Tensor &t);
+    %rename(iSub) operator-=(const Tensor &t);
+    %rename(iMul) operator*=(const Tensor &t);
+    %rename(iDiv) operator/=(const Tensor &t);
+#endif  // USE_JAVA
+
     Tensor &operator+=(const Tensor &t);
     Tensor &operator-=(const Tensor &t);
     Tensor &operator*=(const Tensor &t);
     Tensor &operator/=(const Tensor &t);
 
-
     template <typename DType> Tensor &operator+=(const DType x);
-    %template(iAdd_f) operator+=<float>;
-    // --- other types
+    %template(iAddFloat) operator+=<float>;
 
     template <typename DType> Tensor &operator-=(DType x);
-    %template(iSub_f) operator-=<float>;
-    // --- other types
+    %template(iSubFloat) operator-=<float>;
 
     template <typename DType> Tensor &operator*=(DType x);
-    %template(iMul_f) operator*=<float>;
-    // --- other types
+    %template(iMulFloat) operator*=<float>;
 
     template <typename DType> Tensor &operator/=(DType x);
-    %template(iDiv_f) operator/=<float>;
-    // --- other types
+    %template(iDivFloat) operator/=<float>;
 
 
     /*TODO(chonho-04)
@@ -161,8 +152,6 @@ namespace singa{
     amin
     asum
     */
-
-
   };
 
   void CopyDataToFrom(Tensor *dst, const Tensor &src, size_t num,
@@ -182,180 +171,139 @@ namespace singa{
 
   Tensor Sum(const Tensor &t, int axis);
   template <typename SType> SType Sum(const Tensor &t);
-  %template(floatSum) Sum<float>;
-  // --- other types
+  %template(SumAsFloat) Sum<float>;
 
-  /* TODO(chonho-02)
-     need to implement the average of all elements ??? */
   Tensor Average(const Tensor &t, int axis);
   Tensor SoftMax(const Tensor &t);
 
-
   Tensor Pow(const Tensor &base, const Tensor &exp);
+
+  %rename(PowWithRet) Pow(const Tensor &base, const Tensor &exp, Tensor *out);
   void Pow(const Tensor &base, const Tensor &exp, Tensor *out);
 
-  %rename(Pow_f) Pow(const Tensor &in, const float x);
-  template <typename SType>
-  Tensor Pow(const Tensor &in, const SType x);
-  %template(pow_temp) Pow<float>;
+  template <typename SType> Tensor Pow(const Tensor &in, const SType x);
+  %template(PowFloat) Pow<float>;
 
-  %rename(Pow_f_out) Pow(const Tensor &in, const float x, Tensor *out);
   template <typename SType>
   void Pow(const Tensor &in, const SType x, Tensor *out);
-  %template(pow_temp) Pow<float>;
-
+  %template(PowFloatWithRet) Pow<float>;
 
-  /* rename comparison operators */
-  %rename(LT_Tf) operator<(const Tensor &t, const float x);
-  %rename(LE_Tf) operator<=(const Tensor &t, const float x);
-  %rename(GT_Tf) operator>(const Tensor &t, const float x);
-  %rename(GE_Tf) operator>=(const Tensor &t, const float x);
-  %rename(LT_TT) operator<(const Tensor &lhs, const Tensor &rhs);
-  %rename(LE_TT) operator<=(const Tensor &lhs, const Tensor &rhs);
-  %rename(GT_TT) operator>(const Tensor &lhs, const Tensor &rhs);
-  %rename(GE_TT) operator>=(const Tensor &lhs, const Tensor &rhs);
 
+  %rename(__lt__) operator<(const Tensor &lhs, const Tensor &rhs);
+  %rename(__le__) operator<=(const Tensor &lhs, const Tensor &rhs);
+  %rename(__gt__) operator>(const Tensor &lhs, const Tensor &rhs);
+  %rename(__ge__) operator>=(const Tensor &lhs, const Tensor &rhs);
   Tensor operator<(const Tensor &lhs, const Tensor &rhs);
   Tensor operator<=(const Tensor &lhs, const Tensor &rhs);
   Tensor operator>(const Tensor &lhs, const Tensor &rhs);
   Tensor operator>=(const Tensor &lhs, const Tensor &rhs);
 
 
+  %rename(LTFloat) operator<(const Tensor &t, const float x);
   template <typename DType>
   Tensor operator<(const Tensor &t, const DType x);
-  %template(op) operator< <float>;
-  // --- other types
+  %template(oplt) operator< <float>;
 
-  template <typename DType>
-  Tensor operator<=(const Tensor &t, const DType x);
-  %template(op) operator<= <float>;
-  // --- other types
-
-  template <typename DType>
-  Tensor operator>(const Tensor &t, const DType x);
-  %template(op) operator> <float>;
-  // --- other types
-
-  template <typename DType>
-  Tensor operator>=(const Tensor &t, const DType x);
-  %template(op) operator>= <float>;
-  // --- other types
+  %rename(LEFloat) operator<=(const Tensor &t, const float x);
+  template <typename DType> Tensor operator<=(const Tensor &t, const DType x);
+  %template(ople) operator<= <float>;
 
-  /* NOTE(chonho)
-  no need to include theses
-  in python, these can be replaced with comparison operators
+  %rename(GTFloat) operator>(const Tensor &t, const float x);
+  template <typename DType> Tensor operator>(const Tensor &t, const DType x);
+  %template(opgt) operator> <float>;
 
-  template <typename DType>
-  void LT(const Tensor &t, DType x, Tensor *ret);
-  template <typename DType>
-  void LE(const Tensor &t, DType x, Tensor *ret);
-  template <typename DType>
-  void GT(const Tensor &t, DType x, Tensor *ret);
-  template <typename DType>
-  void GE(const Tensor &t, DType x, Tensor *ret);
-  */
+  %rename(GEFloat) operator>=(const Tensor &t, const float x);
+  template <typename DType> Tensor operator>=(const Tensor &t, const DType x);
+  %template(opge) operator>= <float>;
 
 
   /* ========== Arithmetic operations ========== */
-  %rename(Add_TT) operator+(const Tensor &lhs, const Tensor &rhs);
-  %rename(Sub_TT) operator-(const Tensor &lhs, const Tensor &rhs);
-  %rename(EltwiseMul_TT) operator*(const Tensor &lhs, const Tensor &rhs);
-  %rename(Div_TT) operator/(const Tensor &lhs, const Tensor &rhs);
+  %rename(__add__) operator+(const Tensor &lhs, const Tensor &rhs);
+  %rename(__sub__) operator-(const Tensor &lhs, const Tensor &rhs);
+  %rename(__mul__) operator*(const Tensor &lhs, const Tensor &rhs);
+  %rename(__div__) operator/(const Tensor &lhs, const Tensor &rhs);
   Tensor operator+(const Tensor &lhs, const Tensor &rhs);
   Tensor operator-(const Tensor &lhs, const Tensor &rhs);
   Tensor operator*(const Tensor &lhs, const Tensor &rhs);
   Tensor operator/(const Tensor &lhs, const Tensor &rhs);
-
-  %rename(Add_Tf) operator+(const Tensor &t, float x);
-  template <typename DType>
-  Tensor operator+(const Tensor &t, DType x);
-  %template(op) operator+<float>;
-  // --- other types
-
-  %rename(Sub_Tf) operator-(const Tensor &t, float x);
-  template <typename DType>
-  Tensor operator-(const Tensor &t, DType x);
-  %template(op) operator-<float>;
-  // --- other types
-
-  %rename(EltwiseMul_Tf) operator*(const Tensor &t, float x);
-  template <typename DType>
-  Tensor operator*(const Tensor &t, DType x);
-  %template(op) operator*<float>;
-  // --- other types
-
-  %rename(Div_Tf) operator/(const Tensor &t, float x);
-  template <typename DType>
-  Tensor operator/(const Tensor &t, DType x);
-  %template(op) operator/<float>;
-  // --- other types
-
   void Add(const Tensor &lhs, const Tensor &rhs, Tensor *ret);
   void Sub(const Tensor &lhs, const Tensor &rhs, Tensor *ret);
   void EltwiseMult(const Tensor &lhs, const Tensor &rhs, Tensor *ret);
   void Div(const Tensor &lhs, const Tensor &rhs, Tensor *ret);
 
-  template <typename DType>
-  void Add(const Tensor &t, DType x, Tensor *ret);
-  %template(Add_Tf_out) Add<float>;
-  // --- other types
+  %rename(AddFloat) operator+(const Tensor &t, float x);
+  template <typename DType> Tensor operator+(const Tensor &t, DType x);
+  %template(opadd) operator+ <float>;
+
+  %rename(SubFloat) operator-(const Tensor &t, float x);
+  template <typename DType> Tensor operator-(const Tensor &t, DType x);
+  %template(opsub) operator- <float>;
+
+  %rename(MultFloat) operator*(const Tensor &t, float x);
+  template <typename DType> Tensor operator*(const Tensor &t, DType x);
+  %template(opmul) operator* <float>;
+
+  %rename(DivFloat) operator/(const Tensor &t, float x);
+  template <typename DType> Tensor operator/(const Tensor &t, DType x);
+  %template(opdiv) operator/ <float>;
+
+  template <typename DType> void Add(const Tensor &t, DType x, Tensor *ret);
+  %template(AddFloatWithRet) Add<float>;
 
   template <typename DType>
   void Sub(const Tensor &t, DType x, Tensor *ret);
-  %template(Sub_Tf_out) Sub<float>;
-  // --- other types
+  %template(SubFloatWithRet) Sub<float>;
 
   template <typename DType>
   void EltwiseMult(const Tensor &t, DType x, Tensor *ret);
-  %template(EltwiseMult_Tf_out) EltwiseMult<float>;
-  // --- other types
+  %template(EltwiseMultFloatWithRet) EltwiseMult<float>;
 
   template <typename DType>
   void Div(const Tensor &t, DType x, Tensor *ret);
-  %template(Div_Tf_out) Div<float>;
-  // --- other types
+  %template(DivFloatWithRet) Div<float>;
 
 
   /* ========== Random operations ========== */
   template <typename SType>
   void Bernoulli(const SType p, Tensor *out);
-  %template(floatBernoulli) Bernoulli<float>;
-  // --- other types
+  %template(Bernoulli) Bernoulli<float>;
 
   template <typename SType>
   void Gaussian(const SType mean, const SType std, Tensor *out);
-  %template(floatGaussian) Gaussian<float>;
-  // --- other types
+  %template(Gaussian) Gaussian<float>;
 
   template <typename SType>
   void Uniform(const SType low, const SType high, Tensor *out);
-  %template(floatUniform) Uniform<float>;
-  // --- other types
+  %template(Uniform) Uniform<float>;
+
 
   /* ========== Blas operations ========== */
   template <typename SType>
   void Axpy(SType alpha, const Tensor &in, Tensor *out);
-  %template(floatAxpy) Axpy<float>;
-  // --- other types
+  %template(Axpy) Axpy<float>;
 
   Tensor Mult(const Tensor &A, const Tensor &B);
+  %rename(MultWithRet) Mult(const Tensor &A, const Tensor &B, Tensor *C);
   void Mult(const Tensor &A, const Tensor &B, Tensor *C);
   template <typename SType>
   void Mult(const SType alpha, const Tensor &A, const Tensor &B,
             const SType beta, Tensor *C);
-  %template(floatMult) Mult<float>;
+  %template(MultWithScale) Mult<float>;
+
+
+  /* =========== Matrix operations ==========*/
 
   void AddColumn(const Tensor &v, Tensor *M);
   template <typename SType>
   void AddColumn(const SType alpha, const SType beta, const Tensor &v,
                  Tensor *M);
-  %template(floatAddColumn) AddColumn<float>;
+  %template(AddColumnWithScale) AddColumn<float>;
 
   void AddRow(const Tensor &v, Tensor *M);
   template <typename SType>
   void AddRow(const SType alpha, const SType beta, const Tensor &v,
               Tensor *M);
-  %template(floatAddRow) AddRow<float>;
+  %template(AddRowWithScale) AddRow<float>;
 
   void DivColumn(const Tensor &v, Tensor *M);
   void DivRow(const Tensor &v, Tensor *M);
@@ -369,6 +317,4 @@ namespace singa{
 
   Tensor SoftMax(const Tensor &in);
   void SoftMax(const Tensor &in, Tensor *out);
-
 }
-

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/src/api/io_snapshot.i
----------------------------------------------------------------------
diff --git a/src/api/io_snapshot.i b/src/api/io_snapshot.i
index 2203295..8413a1f 100644
--- a/src/api/io_snapshot.i
+++ b/src/api/io_snapshot.i
@@ -28,8 +28,8 @@
 %}
 
 namespace std{
-%template(nametensorPair) std::pair<string, singa::Tensor>;
-%template(nametensorVec) std::vector<std::pair<string, singa::Tensor>>;
+%template(PairStrTensor) std::pair<string, singa::Tensor>;
+%template(VecPairStrTensor) std::vector<std::pair<string, singa::Tensor>>;
 }
 
 namespace singa {

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/src/api/model_layer.i
----------------------------------------------------------------------
diff --git a/src/api/model_layer.i b/src/api/model_layer.i
index ae651d5..31b2cb6 100644
--- a/src/api/model_layer.i
+++ b/src/api/model_layer.i
@@ -49,11 +49,11 @@ using singa::LayerConf;
 #endif
 
 namespace std {
-  %template(strVector) vector<string>;
-  %template(paramVector) vector<singa::ParamSpec>;
-  %template(tensorVector) vector<singa::Tensor>;
-  %template(ttvecPair) pair<singa::Tensor, vector<singa::Tensor>>;
-  %template(tvecPair) pair<vector<singa::Tensor>, vector<singa::Tensor>>;
+  %template(VecStr) vector<string>;
+  %template(VecParamSpec) vector<singa::ParamSpec>;
+  %template(VecTensor) vector<singa::Tensor>;
+  %template(PairTensorVecTensor) pair<singa::Tensor, vector<singa::Tensor>>;
+  %template(PairVecTensor) pair<vector<singa::Tensor>, vector<singa::Tensor>>;
 }
 
 
@@ -80,6 +80,7 @@ class Layer {
 
 std::shared_ptr<Layer> CreateLayer(const std::string& type);
 const std::vector<std::string> GetRegisteredLayers();
+
 class RNN : public Layer {
 };
 
@@ -88,12 +89,13 @@ class RNN : public Layer {
 class CudnnRNN : public RNN {
  public:
  // note: Must use std::vector instead of vector.
-  const std::vector<Tensor> Forward(int flag, const std::vector<Tensor>& 
inputs) override;
-  const std::pair<std::vector<Tensor>, std::vector<Tensor>> Backward(
-      int flag, const std::vector<Tensor>& grads) override;
+  const std::vector<Tensor> Forward(int flag,
+                                    const std::vector<Tensor>& inputs) 
override;
+  const std::pair<std::vector<Tensor>, std::vector<Tensor>>
+  Backward(int flag, const std::vector<Tensor>& grads) override;
   void ToDevice(std::shared_ptr<Device> device) override;
-    const std::vector<Tensor> param_values() override;
-    const std::vector<size_t> GetOutputSampleShape() const override;
+  const std::vector<Tensor> param_values() override;
+  const std::vector<size_t> GetOutputSampleShape() const override;
 };
 
 #endif  // CUDNN_VERSION_SWIG >= 5005

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/src/core/tensor/tensor.cc
----------------------------------------------------------------------
diff --git a/src/core/tensor/tensor.cc b/src/core/tensor/tensor.cc
index d96b2ec..424edb2 100644
--- a/src/core/tensor/tensor.cc
+++ b/src/core/tensor/tensor.cc
@@ -479,13 +479,14 @@ void Tensor::SetValue(const SType x) {
   auto size = Size();
   auto ptr = block_;
   TYPE_LANG_SWITCH(data_type_, DType, device_->lang(), Lang, {
-    // cast x to DType
+    // TODO(wangwei) cast x to DType
     device_->Exec([size, x, ptr](Context *ctx) {
       Set<DType, Lang>(size, x, ptr, ctx);
     }, {}, {ptr});
   });
 }
 template void Tensor::SetValue<float>(const float x);
+template void Tensor::SetValue<int>(const int x);
 
 #define EltwiseUnaryTensorFn(fn, t, ret)                               \
   do {                                                                 \

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/src/core/tensor/tensor_math_cpp.h
----------------------------------------------------------------------
diff --git a/src/core/tensor/tensor_math_cpp.h 
b/src/core/tensor/tensor_math_cpp.h
index edae209..f7e2b37 100644
--- a/src/core/tensor/tensor_math_cpp.h
+++ b/src/core/tensor/tensor_math_cpp.h
@@ -256,6 +256,13 @@ void Set<float, lang::Cpp>(const size_t num, const float 
x, Block *out,
   for (size_t i = 0; i < num; i++) outPtr[i] = x;
 }
 template <>
+void Set<int, lang::Cpp>(const size_t num, const int x, Block *out,
+                           Context *ctx) {
+  int *outPtr = static_cast<int *>(out->mutable_data());
+  for (size_t i = 0; i < num; i++) outPtr[i] = x;
+}
+
+template <>
 void Sigmoid<float, lang::Cpp>(const size_t num, const Block *in, Block *out,
                                Context *ctx) {
   float *outPtr = static_cast<float *>(out->mutable_data());

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/src/proto/core.proto
----------------------------------------------------------------------
diff --git a/src/proto/core.proto b/src/proto/core.proto
index d455a9e..9264e55 100644
--- a/src/proto/core.proto
+++ b/src/proto/core.proto
@@ -18,7 +18,7 @@
 
 package singa;
 
-option java_package = "singa.proto";
+option java_package = "org.apache.singa.proto";
 
 // TODO(wangwei) check protobuf version to include the syntax
 //syntax = "proto2";

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/src/proto/io.proto
----------------------------------------------------------------------
diff --git a/src/proto/io.proto b/src/proto/io.proto
index ab56983..e7ef21f 100644
--- a/src/proto/io.proto
+++ b/src/proto/io.proto
@@ -18,7 +18,7 @@
 
 package singa;
 
-option java_package = "singa.proto";
+option java_package = "org.apache.singa.proto";
 
 message EncoderConf {
   optional string type = 1 [default = "jpg2proto"];

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/src/proto/model.proto
----------------------------------------------------------------------
diff --git a/src/proto/model.proto b/src/proto/model.proto
index 6be5e0a..545b556 100644
--- a/src/proto/model.proto
+++ b/src/proto/model.proto
@@ -18,7 +18,7 @@
 
 package singa;
 
-option java_package = "singa.proto";
+option java_package = "org.apache.singa.proto";
 /// \file layer.proto is adapted from [Caffe](https://github.com/BVLC/caffe/)'s
 /// proto file with commit id c419f8517b1e1b3d7a07fe212fc6c90a70b519ea. We
 /// use caffe's protocol for configuring layer hyper-parameters for easy

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/c967169b/test/python/test_tensor.py
----------------------------------------------------------------------
diff --git a/test/python/test_tensor.py b/test/python/test_tensor.py
index a1f220b..9cd2411 100644
--- a/test/python/test_tensor.py
+++ b/test/python/test_tensor.py
@@ -16,12 +16,9 @@
 # under the License.
 # =============================================================================
 
-import sys
-import os
 import math
 import unittest
-
-sys.path.append(os.path.join(os.path.dirname(__file__), '../../build/python'))
+import numpy as np
 
 
 from singa import tensor
@@ -132,34 +129,40 @@ class TestTensorMethods(unittest.TestCase):
         x.gaussian(1, 0.01)
         self.assertAlmostEqual(tensor.average(x), 1, 3)
 
-
     def test_radd(self):
         x = tensor.Tensor((3,))
         x.set_value(1)
         y = 1 + x
         self.assertEqual(tensor.average(y), 2.)
 
-
     def test_rsub(self):
         x = tensor.Tensor((3,))
         x.set_value(1)
         y = 1 - x
         self.assertEqual(tensor.average(y), 0.)
 
-
     def test_rmul(self):
         x = tensor.Tensor((3,))
         x.set_value(1)
         y = 2 * x
         self.assertEqual(tensor.average(y), 2.)
 
-
     def test_rdiv(self):
         x = tensor.Tensor((3,))
         x.set_value(1)
         y = 2 / x
         self.assertEqual(tensor.average(y), 2.)
 
+    def test_numpy_convert(self):
+        a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.int)
+        t = tensor.from_numpy(a)
+        b = tensor.to_numpy(t)
+        self.assertEqual(np.sum(a-b), 0)
+
+        a = np.asarray([[1, 0, 0], [0, 1, 0]], dtype=np.float32)
+        t = tensor.from_numpy(a)
+        b = tensor.to_numpy(t)
+        self.assertEqual(np.sum(a-b), 0.)
 
 
 if __name__ == '__main__':

Reply via email to