rondogency commented on a change in pull request #17885:
URL: https://github.com/apache/incubator-mxnet/pull/17885#discussion_r411651625
##########
File path: CMakeLists.txt
##########
@@ -726,18 +726,39 @@ endif()
# extension libraries (custom operators, custom subgraphs) are built by default
add_library(customop_lib SHARED
${CMAKE_CURRENT_SOURCE_DIR}/example/extensions/lib_custom_op/gemm_lib.cc)
+add_library(transposecsr_lib SHARED
${CMAKE_CURRENT_SOURCE_DIR}/example/extensions/lib_custom_op/transposecsr_lib.cc)
+add_library(transposerowsp_lib SHARED
${CMAKE_CURRENT_SOURCE_DIR}/example/extensions/lib_custom_op/transposerowsp_lib.cc)
add_library(subgraph_lib SHARED
${CMAKE_CURRENT_SOURCE_DIR}/example/extensions/lib_subgraph/subgraph_lib.cc)
+add_library(pass_lib SHARED
${CMAKE_CURRENT_SOURCE_DIR}/example/extensions/lib_pass/pass_lib.cc)
target_include_directories(customop_lib PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/include/mxnet)
+target_include_directories(transposecsr_lib PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/include/mxnet)
+target_include_directories(transposerowsp_lib PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/include/mxnet)
target_include_directories(subgraph_lib PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/include/mxnet)
+target_include_directories(pass_lib PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/include/mxnet)
if(USE_CUDA)
add_library(customop_gpu_lib SHARED
${CMAKE_CURRENT_SOURCE_DIR}/example/extensions/lib_custom_op/relu_lib.cu)
target_include_directories(customop_gpu_lib PUBLIC
${CMAKE_CURRENT_SOURCE_DIR}/include/mxnet)
endif()
-if(MSVC)
+if(UNIX)
Review comment:
those things can be deleted
##########
File path: Makefile
##########
@@ -686,21 +686,36 @@ pylint:
python3 -m pylint --rcfile=$(ROOTDIR)/ci/other/pylintrc
--ignore-patterns=".*\.so$$,.*\.dll$$,.*\.dylib$$" python/mxnet
# MXNet extension dynamically loading libraries
-EXT_LIBS = build/libcustomop_lib.so build/libsubgraph_lib.so
+EXT_LIBS = build/libcustomop_lib.so build/libtransposecsr_lib.so
build/libtransposerowsp_lib.so build/libsubgraph_lib.so build/libpass_lib.so
ifeq ($(USE_CUDA), 1)
EXT_LIBS += build/libcustomop_gpu_lib.so
endif
extension_libs: $(EXT_LIBS)
build/libcustomop_lib.so:
@mkdir -p $(@D)
+ $(CXX) -shared -fPIC -std=c++11
example/extensions/lib_custom_op/gemm_lib.cc -o /dev/null -I include/mxnet
$(CXX) -shared -fPIC -std=c++17
example/extensions/lib_custom_op/gemm_lib.cc -o $@ -I include/mxnet
+build/libtransposecsr_lib.so:
+ @mkdir -p $(@D)
+ $(CXX) -shared -fPIC -std=c++11
example/extensions/lib_custom_op/transposecsr_lib.cc -o /dev/null -I
include/mxnet
+ $(CXX) -shared -fPIC -std=c++17
example/extensions/lib_custom_op/transposecsr_lib.cc -o $@ -I include/mxnet
Review comment:
we should write explicitly it requires gcc 7 or any other prerequisite
to use c++17
##########
File path: example/extensions/lib_custom_op/gemm_lib.cc
##########
@@ -87,20 +87,20 @@ MXReturnValue forward(std::map<std::string, std::string>
attrs,
***** gradient outputs
* outputs[0] = dA; outputs[1] = dB
*/
-MXReturnValue backward(std::map<std::string, std::string> attrs,
- std::vector<MXTensor> inputs,
- std::vector<MXTensor> outputs,
- OpResource res) {
+MXReturnValue backward(const std::unordered_map<std::string, std::string>&
attrs,
+ std::vector<MXTensor>* inputs,
Review comment:
don't forget to update this change to lib_custom_op README
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]