[GitHub] [incubator-mxnet] ElaineBao commented on issue #17265: Add bfloat16 floating-point format support based on AMP

2020-01-13 Thread GitBox
ElaineBao commented on issue #17265: Add bfloat16 floating-point format support 
based on AMP 
URL: https://github.com/apache/incubator-mxnet/pull/17265#issuecomment-574050844
 
 
   > For bfloat16 training, which loss scalar is recommended? Do we also need 
to perform NaN checks?
   
   Bfloat16 has the same dynamic range as float32, since they have the same 
exponent bits. So it can represent gradients directly, it doesn't require loss 
scaling like fp16.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] sjtuWangDing commented on a change in pull request #17188: [Numpy] Add linalg.eig/eigh/eigvals/eigvalsh op

2020-01-13 Thread GitBox
sjtuWangDing commented on a change in pull request #17188: [Numpy] Add 
linalg.eig/eigh/eigvals/eigvalsh op
URL: https://github.com/apache/incubator-mxnet/pull/17188#discussion_r366190317
 
 

 ##
 File path: src/operator/numpy/linalg/np_eigvals.cc
 ##
 @@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+/*!
+ * Copyright (c) 2019 by Contributors
+ * \file np_eigvals.cc
+ * \brief CPU implementation placeholder of Eigvals Operator
+ */
+#include "./np_eigvals-inl.h"
+
+namespace mxnet {
+namespace op {
+
+// Inputs: A.
+// Outputs: Eig.
+bool EigvalsOpShape(const nnvm::NodeAttrs& attrs,
+mxnet::ShapeVector *in_attrs,
+mxnet::ShapeVector *out_attrs) {
+  CHECK_EQ(in_attrs->size(), 1U);
+  CHECK_EQ(out_attrs->size(), 1U);
+  const mxnet::TShape& a_shape = (*in_attrs)[0];
+  const mxnet::TShape& eig_shape = (*out_attrs)[0];
+
+  if (shape_is_known(a_shape)) {
+// Forward shape inference.
+const int a_ndim = a_shape.ndim();
+CHECK_GE(a_ndim, 2)
+  << "Array must be at least two-dimensional";
+CHECK_EQ(a_shape[a_ndim - 2], a_shape[a_ndim - 1])
+  << "Input A's last two dimension must be equal";
+
+// Calculate eig shape.
+std::vector eig_shape_vec(a_ndim - 1, -1);
+for (int i = 0; i < a_ndim - 1; ++i) {
+  eig_shape_vec[i] = a_shape[i];
+}
+mxnet::TShape eig_shape(eig_shape_vec.begin(), eig_shape_vec.end());
+SHAPE_ASSIGN_CHECK(*out_attrs, 0, eig_shape);
+  } else {
+// Backward shape inference.
+if (shape_is_known(eig_shape)) {
+  const int eig_ndim = eig_shape.ndim();
+  CHECK_GE(eig_ndim, 1)
+<< "Outputs W must be at least one-dimensional";
+  std::vector a_shape_vec(eig_ndim + 1);
+  for (int i = 0; i < eig_ndim; ++i) {
+a_shape_vec[i] = eig_shape[i];
+  }
+  a_shape_vec[eig_ndim] = eig_shape[eig_ndim - 1];
+  mxnet::TShape a_shape(a_shape_vec.begin(), a_shape_vec.end());
+  SHAPE_ASSIGN_CHECK(*in_attrs, 0, a_shape);
+}
+  }
+  return shape_is_known(*in_attrs) && shape_is_known(*out_attrs);
+}
+
+inline bool EigvalsOpType(const nnvm::NodeAttrs& attrs,
+  std::vector* in_attrs,
+  std::vector* out_attrs) {
+  CHECK_EQ(in_attrs->size(), 1U);
+  CHECK_EQ(out_attrs->size(), 1U);
+  int a_type = in_attrs->at(0);
+  // unsupport float16
 
 Review comment:
   These 2 ops support integer inputs in numpy.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] eric-haibin-lin commented on a change in pull request #17265: Add bfloat16 floating-point format support based on AMP

2020-01-13 Thread GitBox
eric-haibin-lin commented on a change in pull request #17265: Add bfloat16 
floating-point format support based on AMP 
URL: https://github.com/apache/incubator-mxnet/pull/17265#discussion_r366188211
 
 

 ##
 File path: src/engine/naive_engine.cc
 ##
 @@ -55,7 +55,7 @@ class NaiveEngine final : public Engine {
 std::vector const_vars;
 std::vector mutable_vars;
 FnProperty prop;
-const char* opr_name;
+std::string opr_name;
 
 Review comment:
   I don't think it's merged yet. I'm ok with the change here.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] ChaiBapchya commented on a change in pull request #17297: Fix NCCL Cmake autodetect issue

2020-01-13 Thread GitBox
ChaiBapchya commented on a change in pull request #17297: Fix NCCL Cmake 
autodetect issue
URL: https://github.com/apache/incubator-mxnet/pull/17297#discussion_r366185864
 
 

 ##
 File path: cmake/Modules/FindNCCL.cmake
 ##
 @@ -33,6 +33,26 @@
 
 set(NCCL_ROOT_DIR "" CACHE PATH "Folder contains NVIDIA NCCL")
 
+# if CUDAToolkit_FOUND is not found, try default location
+#if (NOT CUDAToolkit_FOUND)
+  if (UNIX)
+set (search_paths "/usr/local/cuda")
+
+find_path(NCCL_INCLUDE_DIRS
+NAMES nccl.h
+PATHS ${search_paths}
 
 Review comment:
   https://cmake.org/cmake/help/v3.4/command/find_path.html
   Looks like either Hints or paths is permitted. Any specific reason to use 
hints?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet] branch master updated (a8452aa -> 58cbd65)

2020-01-13 Thread sxjscience
This is an automated email from the ASF dual-hosted git repository.

sxjscience pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from a8452aa  Fix language selection in get_started options.js (#17284)
 add 58cbd65  [MXNET-978] Higher Order Gradient Support `rsqrt`, `rcbrt`. 
(#15476)

No new revisions were added by this update.

Summary of changes:
 src/operator/tensor/elemwise_unary_op_pow.cc| 56 -
 tests/python/unittest/test_higher_order_grad.py | 40 ++
 2 files changed, 94 insertions(+), 2 deletions(-)



[incubator-mxnet] branch master updated (a8452aa -> 58cbd65)

2020-01-13 Thread sxjscience
This is an automated email from the ASF dual-hosted git repository.

sxjscience pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from a8452aa  Fix language selection in get_started options.js (#17284)
 add 58cbd65  [MXNET-978] Higher Order Gradient Support `rsqrt`, `rcbrt`. 
(#15476)

No new revisions were added by this update.

Summary of changes:
 src/operator/tensor/elemwise_unary_op_pow.cc| 56 -
 tests/python/unittest/test_higher_order_grad.py | 40 ++
 2 files changed, 94 insertions(+), 2 deletions(-)



[GitHub] [incubator-mxnet] sxjscience merged pull request #15476: [MXNET-978] Higher Order Gradient Support `rsqrt`, `rcbrt`.

2020-01-13 Thread GitBox
sxjscience merged pull request #15476: [MXNET-978] Higher Order Gradient 
Support `rsqrt`, `rcbrt`.
URL: https://github.com/apache/incubator-mxnet/pull/15476
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] sxjscience commented on issue #15476: [MXNET-978] Higher Order Gradient Support `rsqrt`, `rcbrt`.

2020-01-13 Thread GitBox
sxjscience commented on issue #15476: [MXNET-978] Higher Order Gradient Support 
`rsqrt`, `rcbrt`.
URL: https://github.com/apache/incubator-mxnet/pull/15476#issuecomment-574036070
 
 
   @kshitij12345 Thanks!


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet-site] branch asf-site updated: Bump the publish timestamp.

2020-01-13 Thread aaronmarkham
This is an automated email from the ASF dual-hosted git repository.

aaronmarkham pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new f8759d6  Bump the publish timestamp.
f8759d6 is described below

commit f8759d626832d700ac530ed04d4dbcb38efc7ecc
Author: mxnet-ci 
AuthorDate: Tue Jan 14 06:53:59 2020 +

Bump the publish timestamp.
---
 date.txt | 1 +
 1 file changed, 1 insertion(+)

diff --git a/date.txt b/date.txt
new file mode 100644
index 000..6d06ae9
--- /dev/null
+++ b/date.txt
@@ -0,0 +1 @@
+Tue Jan 14 06:53:59 UTC 2020



[GitHub] [incubator-mxnet] apeforest commented on issue #17159: Performance regression from 1.4.1 to 1.5.1

2020-01-13 Thread GitBox
apeforest commented on issue #17159: Performance regression from 1.4.1 to 1.5.1
URL: 
https://github.com/apache/incubator-mxnet/issues/17159#issuecomment-574029399
 
 
   The original issue was regression between 1.4 and 1.5. Could you also post 
results built from source using 1.4?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] kshitij12345 commented on issue #15476: [MXNET-978] Higher Order Gradient Support `rsqrt`, `rcbrt`.

2020-01-13 Thread GitBox
kshitij12345 commented on issue #15476: [MXNET-978] Higher Order Gradient 
Support `rsqrt`, `rcbrt`.
URL: https://github.com/apache/incubator-mxnet/pull/15476#issuecomment-574026788
 
 
   @apeforest @larroy @sxjscience
   Gentle ping for review


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] kshitij12345 commented on issue #15679: [MXNET-978] Higher Order Gradient Support `softsign`, `hard_sigmoid`.

2020-01-13 Thread GitBox
kshitij12345 commented on issue #15679: [MXNET-978] Higher Order Gradient 
Support `softsign`, `hard_sigmoid`.
URL: https://github.com/apache/incubator-mxnet/pull/15679#issuecomment-574026504
 
 
   @apeforest @larroy @sxjscience Gentle ping for review.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet] branch master updated (2938684 -> a8452aa)

2020-01-13 Thread lausen
This is an automated email from the ASF dual-hosted git repository.

lausen pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 2938684  handle array_like fill_value for np.full; add unit test 
coverage (#17245)
 add a8452aa  Fix language selection in get_started options.js (#17284)

No new revisions were added by this update.

Summary of changes:
 docs/static_site/src/assets/js/options.js | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)



[GitHub] [incubator-mxnet] leezu commented on issue #17284: Fix language selection in get_started options.js

2020-01-13 Thread GitBox
leezu commented on issue #17284: Fix language selection in get_started 
options.js
URL: https://github.com/apache/incubator-mxnet/pull/17284#issuecomment-57409
 
 
   I also tested the change by modifying the file using Chrome's local 
overrides feature. I don't think PRs can be currently previewed as part of the 
CI built.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] leezu merged pull request #17284: Fix language selection in get_started options.js

2020-01-13 Thread GitBox
leezu merged pull request #17284: Fix language selection in get_started 
options.js
URL: https://github.com/apache/incubator-mxnet/pull/17284
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet] branch master updated (1a61a86 -> 2938684)

2020-01-13 Thread reminisce
This is an automated email from the ASF dual-hosted git repository.

reminisce pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 1a61a86  HMM Model (#17120)
 add 2938684  handle array_like fill_value for np.full; add unit test 
coverage (#17245)

No new revisions were added by this update.

Summary of changes:
 python/mxnet/ndarray/numpy/_op.py  | 16 ++-
 python/mxnet/numpy/multiarray.py   |  4 ++-
 python/mxnet/symbol/numpy/_symbol.py   | 12 ++---
 tests/python/unittest/test_numpy_op.py | 49 ++
 4 files changed, 76 insertions(+), 5 deletions(-)



[incubator-mxnet] branch master updated (1a61a86 -> 2938684)

2020-01-13 Thread reminisce
This is an automated email from the ASF dual-hosted git repository.

reminisce pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 1a61a86  HMM Model (#17120)
 add 2938684  handle array_like fill_value for np.full; add unit test 
coverage (#17245)

No new revisions were added by this update.

Summary of changes:
 python/mxnet/ndarray/numpy/_op.py  | 16 ++-
 python/mxnet/numpy/multiarray.py   |  4 ++-
 python/mxnet/symbol/numpy/_symbol.py   | 12 ++---
 tests/python/unittest/test_numpy_op.py | 49 ++
 4 files changed, 76 insertions(+), 5 deletions(-)



[incubator-mxnet] branch master updated (75b93bb -> 1a61a86)

2020-01-13 Thread haoj
This is an automated email from the ASF dual-hosted git repository.

haoj pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 75b93bb  [CD] fix CD pipeline (#17259)
 add 1a61a86  HMM Model (#17120)

No new revisions were added by this update.

Summary of changes:
 python/mxnet/numpy/utils.py   | 3 ++-
 src/operator/tensor/broadcast_reduce_op.h | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)



[incubator-mxnet] branch master updated (75b93bb -> 1a61a86)

2020-01-13 Thread haoj
This is an automated email from the ASF dual-hosted git repository.

haoj pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 75b93bb  [CD] fix CD pipeline (#17259)
 add 1a61a86  HMM Model (#17120)

No new revisions were added by this update.

Summary of changes:
 python/mxnet/numpy/utils.py   | 3 ++-
 src/operator/tensor/broadcast_reduce_op.h | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)



[GitHub] [incubator-mxnet] leezu commented on a change in pull request #17297: Fix NCCL Cmake autodetect issue

2020-01-13 Thread GitBox
leezu commented on a change in pull request #17297: Fix NCCL Cmake autodetect 
issue
URL: https://github.com/apache/incubator-mxnet/pull/17297#discussion_r366166613
 
 

 ##
 File path: cmake/Modules/FindNCCL.cmake
 ##
 @@ -33,6 +33,26 @@
 
 set(NCCL_ROOT_DIR "" CACHE PATH "Folder contains NVIDIA NCCL")
 
+# if CUDAToolkit_FOUND is not found, try default location
+#if (NOT CUDAToolkit_FOUND)
+  if (UNIX)
+set (search_paths "/usr/local/cuda")
+
+find_path(NCCL_INCLUDE_DIRS
+NAMES nccl.h
+PATHS ${search_paths}
 
 Review comment:
   Should this be HINTS instead of PATHS?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] reminisce merged pull request #17245: Handle extra cases for np.full

2020-01-13 Thread GitBox
reminisce merged pull request #17245: Handle extra cases for np.full
URL: https://github.com/apache/incubator-mxnet/pull/17245
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] leezu commented on a change in pull request #17297: Fix NCCL Cmake autodetect issue

2020-01-13 Thread GitBox
leezu commented on a change in pull request #17297: Fix NCCL Cmake autodetect 
issue
URL: https://github.com/apache/incubator-mxnet/pull/17297#discussion_r366166613
 
 

 ##
 File path: cmake/Modules/FindNCCL.cmake
 ##
 @@ -33,6 +33,26 @@
 
 set(NCCL_ROOT_DIR "" CACHE PATH "Folder contains NVIDIA NCCL")
 
+# if CUDAToolkit_FOUND is not found, try default location
+#if (NOT CUDAToolkit_FOUND)
+  if (UNIX)
+set (search_paths "/usr/local/cuda")
+
+find_path(NCCL_INCLUDE_DIRS
+NAMES nccl.h
+PATHS ${search_paths}
 
 Review comment:
   Should this be HINTS?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] haojin2 merged pull request #17120: Fix Several Issues In the HMM Model

2020-01-13 Thread GitBox
haojin2 merged pull request #17120: Fix Several Issues In the HMM Model
URL: https://github.com/apache/incubator-mxnet/pull/17120
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] haojin2 commented on issue #17298: [MXNET-1438] Adding SDML loss function

2020-01-13 Thread GitBox
haojin2 commented on issue #17298: [MXNET-1438] Adding SDML loss function
URL: https://github.com/apache/incubator-mxnet/pull/17298#issuecomment-574016263
 
 
   @anjishnu Please address the sanity errors: 
http://jenkins.mxnet-ci.amazon-ml.com/job/mxnet-validation/job/sanity/job/PR-17298/1/display/redirect
 . Also can you randomize your unit test to ensure that we're covering more 
numerically different cases?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest commented on a change in pull request #17294: fix build from source instruction

2020-01-13 Thread GitBox
apeforest commented on a change in pull request #17294: fix build from source 
instruction
URL: https://github.com/apache/incubator-mxnet/pull/17294#discussion_r366157969
 
 

 ##
 File path: docs/static_site/src/pages/get_started/ubuntu_setup.md
 ##
 @@ -63,11 +63,11 @@ Then download [cuDNN 
7.1.4](https://developer.nvidia.com/cudnn).
 Unzip the file and change to the cuDNN root directory. Move the header and 
libraries to your local CUDA Toolkit folder:
 
 ```bash
-tar xvzf cudnn-9.2-linux-x64-v7.1
 
 Review comment:
   This PR only changes formatting and spacing. For content change, please 
create a separate PR.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest commented on a change in pull request #17294: fix build from source instruction

2020-01-13 Thread GitBox
apeforest commented on a change in pull request #17294: fix build from source 
instruction
URL: https://github.com/apache/incubator-mxnet/pull/17294#discussion_r366157949
 
 

 ##
 File path: docs/static_site/src/pages/get_started/ubuntu_setup.md
 ##
 @@ -63,11 +63,11 @@ Then download [cuDNN 
7.1.4](https://developer.nvidia.com/cudnn).
 Unzip the file and change to the cuDNN root directory. Move the header and 
libraries to your local CUDA Toolkit folder:
 
 ```bash
-tar xvzf cudnn-9.2-linux-x64-v7.1
-sudo cp -P cuda/include/cudnn.h /usr/local/cuda/include
 
 Review comment:
   This PR only changes formatting and spacing. For content change, please 
create a separate PR.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest commented on issue #17294: fix build from source instruction

2020-01-13 Thread GitBox
apeforest commented on issue #17294: fix build from source instruction
URL: https://github.com/apache/incubator-mxnet/pull/17294#issuecomment-574008165
 
 
   > Why do we have 2 build from source pages, this is all extremely confusing.
   > 
   > https://mxnet.apache.org/get_started/ubuntu_setup.html
   > https://mxnet.apache.org/get_started/build_from_source
   
   This is out of scope of this PR. Please raise this in a seprate PR.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] ZhennanQin commented on a change in pull request #17265: Add bfloat16 floating-point format support based on AMP

2020-01-13 Thread GitBox
ZhennanQin commented on a change in pull request #17265: Add bfloat16 
floating-point format support based on AMP 
URL: https://github.com/apache/incubator-mxnet/pull/17265#discussion_r366153708
 
 

 ##
 File path: src/engine/naive_engine.cc
 ##
 @@ -55,7 +55,7 @@ class NaiveEngine final : public Engine {
 std::vector const_vars;
 std::vector mutable_vars;
 FnProperty prop;
-const char* opr_name;
+std::string opr_name;
 
 Review comment:
   This is a bugfix for naive engine. If my remember is correct, MXNet already 
had this fixed with different approach. So we can drop this change from this PR.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] pengzhao-intel commented on a change in pull request #17265: Add bfloat16 floating-point format support based on AMP

2020-01-13 Thread GitBox
pengzhao-intel commented on a change in pull request #17265: Add bfloat16 
floating-point format support based on AMP 
URL: https://github.com/apache/incubator-mxnet/pull/17265#discussion_r366151188
 
 

 ##
 File path: .gitmodules
 ##
 @@ -6,7 +6,7 @@
url = https://github.com/dmlc/ps-lite
 [submodule "3rdparty/dlpack"]
path = 3rdparty/dlpack
-   url = https://github.com/dmlc/dlpack
+   url = https://github.com/ElaineBao/dlpack.git
 
 Review comment:
   Definitely :) We're working on PR the related code in dlpack 
https://github.com/dmlc/dlpack/issues/45


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet] branch master updated (461f167 -> 75b93bb)

2020-01-13 Thread zhasheng
This is an automated email from the ASF dual-hosted git repository.

zhasheng pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 461f167  porting numpy-compatible hstack to master and add dstack for 
interoperability (#17030)
 add 75b93bb  [CD] fix CD pipeline (#17259)

No new revisions were added by this update.

Summary of changes:
 cd/Jenkinsfile_release_job | 10 +-
 cd/python/pypi/Jenkins_pipeline.groovy |  4 ++--
 ci/docker/Dockerfile.build.ubuntu_build_cuda   |  6 +++---
 ci/docker/Dockerfile.build.ubuntu_gpu_cu100|  1 +
 ci/docker/Dockerfile.build.ubuntu_gpu_cu101|  1 +
 ci/docker/Dockerfile.build.ubuntu_gpu_cu102|  1 +
 ci/docker/Dockerfile.build.ubuntu_gpu_cu90 |  1 +
 ci/docker/Dockerfile.build.ubuntu_gpu_cu92 |  1 +
 ci/docker/Dockerfile.publish.ubuntu1404_cpu|  4 ++--
 ci/docker/Dockerfile.publish.ubuntu1404_gpu|  4 ++--
 ci/docker/install/{ubuntu_ar.sh => ubuntu_binutils.sh} |  2 +-
 ci/docker/runtime_functions.sh |  7 +--
 tests/python/mkl/test_mkldnn.py|  4 ++--
 tests/python/unittest/test_operator.py |  4 +++-
 14 files changed, 30 insertions(+), 20 deletions(-)
 rename ci/docker/install/{ubuntu_ar.sh => ubuntu_binutils.sh} (93%)



[incubator-mxnet] branch master updated (461f167 -> 75b93bb)

2020-01-13 Thread zhasheng
This is an automated email from the ASF dual-hosted git repository.

zhasheng pushed a change to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git.


from 461f167  porting numpy-compatible hstack to master and add dstack for 
interoperability (#17030)
 add 75b93bb  [CD] fix CD pipeline (#17259)

No new revisions were added by this update.

Summary of changes:
 cd/Jenkinsfile_release_job | 10 +-
 cd/python/pypi/Jenkins_pipeline.groovy |  4 ++--
 ci/docker/Dockerfile.build.ubuntu_build_cuda   |  6 +++---
 ci/docker/Dockerfile.build.ubuntu_gpu_cu100|  1 +
 ci/docker/Dockerfile.build.ubuntu_gpu_cu101|  1 +
 ci/docker/Dockerfile.build.ubuntu_gpu_cu102|  1 +
 ci/docker/Dockerfile.build.ubuntu_gpu_cu90 |  1 +
 ci/docker/Dockerfile.build.ubuntu_gpu_cu92 |  1 +
 ci/docker/Dockerfile.publish.ubuntu1404_cpu|  4 ++--
 ci/docker/Dockerfile.publish.ubuntu1404_gpu|  4 ++--
 ci/docker/install/{ubuntu_ar.sh => ubuntu_binutils.sh} |  2 +-
 ci/docker/runtime_functions.sh |  7 +--
 tests/python/mkl/test_mkldnn.py|  4 ++--
 tests/python/unittest/test_operator.py |  4 +++-
 14 files changed, 30 insertions(+), 20 deletions(-)
 rename ci/docker/install/{ubuntu_ar.sh => ubuntu_binutils.sh} (93%)



[GitHub] [incubator-mxnet] szha merged pull request #17259: [CD] fix CD pipeline

2020-01-13 Thread GitBox
szha merged pull request #17259: [CD] fix CD pipeline
URL: https://github.com/apache/incubator-mxnet/pull/17259
 
 
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] anjishnu opened a new pull request #17298: [MXNET-1438] Adding SDML loss function

2020-01-13 Thread GitBox
anjishnu opened a new pull request #17298: [MXNET-1438] Adding SDML loss 
function
URL: https://github.com/apache/incubator-mxnet/pull/17298
 
 
   ## Description ##
   (Brief description on what this PR is about)
   
   ## Checklist ##
   ### Essentials ###
   Please feel free to remove inapplicable items for your PR.
   - [x] The PR title starts with [MXNET-$JIRA_ID], where $JIRA_ID refers to 
the relevant [JIRA issue](https://issues.apache.org/jira/projects/MXNET/issues) 
created (except PRs with tiny changes)
   - [x] Changes are complete (i.e. I finished coding on this PR)
   - [x] All changes have test coverage:
   - [x] Unit tests are added for small changes to verify correctness (e.g. 
adding a new operator)
   - [x] Code is well-documented: 
   - [x] For user-facing API changes, API doc string has been updated. 
   - [x] To the best of my knowledge, examples are either not affected by this 
change, or have been fixed to be compatible with this change
   - Check the API doc at 
https://mxnet-ci-doc.s3-accelerate.dualstack.amazonaws.com/PR-$PR_ID/$BUILD_ID/index.html
   
   ### Changes ###
   - [x] Added new loss for information retrieval - Smoothed Deep Metric 
Learning Loss with relevant API doc.
   
   ## Comments ##
   - Added loss function outlined in https://arxiv.org/abs/1905.12786 - example 
to follow.
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest commented on a change in pull request #17297: Fix NCCL Cmake autodetect issue

2020-01-13 Thread GitBox
apeforest commented on a change in pull request #17297: Fix NCCL Cmake 
autodetect issue
URL: https://github.com/apache/incubator-mxnet/pull/17297#discussion_r366134308
 
 

 ##
 File path: cmake/Modules/FindNCCL.cmake
 ##
 @@ -33,6 +33,26 @@
 
 set(NCCL_ROOT_DIR "" CACHE PATH "Folder contains NVIDIA NCCL")
 
+# if CUDAToolkit_FOUND is not found, try default location
+#if (NOT CUDAToolkit_FOUND)
 
 Review comment:
   Remove?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest commented on a change in pull request #17297: Fix NCCL Cmake autodetect issue

2020-01-13 Thread GitBox
apeforest commented on a change in pull request #17297: Fix NCCL Cmake 
autodetect issue
URL: https://github.com/apache/incubator-mxnet/pull/17297#discussion_r366134308
 
 

 ##
 File path: cmake/Modules/FindNCCL.cmake
 ##
 @@ -33,6 +33,26 @@
 
 set(NCCL_ROOT_DIR "" CACHE PATH "Folder contains NVIDIA NCCL")
 
+# if CUDAToolkit_FOUND is not found, try default location
+#if (NOT CUDAToolkit_FOUND)
 
 Review comment:
   Space?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest commented on a change in pull request #17297: Fix NCCL Cmake autodetect issue

2020-01-13 Thread GitBox
apeforest commented on a change in pull request #17297: Fix NCCL Cmake 
autodetect issue
URL: https://github.com/apache/incubator-mxnet/pull/17297#discussion_r366134380
 
 

 ##
 File path: cmake/Modules/FindNCCL.cmake
 ##
 @@ -33,6 +33,26 @@
 
 set(NCCL_ROOT_DIR "" CACHE PATH "Folder contains NVIDIA NCCL")
 
+# if CUDAToolkit_FOUND is not found, try default location
+#if (NOT CUDAToolkit_FOUND)
+  if (UNIX)
+set (search_paths "/usr/local/cuda")
+
+find_path(NCCL_INCLUDE_DIRS
+NAMES nccl.h
+PATHS ${search_paths}
+PATH_SUFFIXES include
+)
+
+find_library(NCCL_LIBRARIES
+NAMES nccl
+PATHS ${search_paths}
+PATH_SUFFIXES lib
+)
+
+  endif()
+  #endif()
 
 Review comment:
   Remove 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest commented on a change in pull request #17297: Fix NCCL Cmake autodetect issue

2020-01-13 Thread GitBox
apeforest commented on a change in pull request #17297: Fix NCCL Cmake 
autodetect issue
URL: https://github.com/apache/incubator-mxnet/pull/17297#discussion_r366134334
 
 

 ##
 File path: cmake/Modules/FindNCCL.cmake
 ##
 @@ -33,6 +33,26 @@
 
 set(NCCL_ROOT_DIR "" CACHE PATH "Folder contains NVIDIA NCCL")
 
+# if CUDAToolkit_FOUND is not found, try default location
+#if (NOT CUDAToolkit_FOUND)
+  if (UNIX)
+set (search_paths "/usr/local/cuda")
+
+find_path(NCCL_INCLUDE_DIRS
+NAMES nccl.h
 
 Review comment:
   Indent


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] anirudh2290 commented on a change in pull request #16654: Multithreaded Inference Support

2020-01-13 Thread GitBox
anirudh2290 commented on a change in pull request #16654: Multithreaded 
Inference Support
URL: https://github.com/apache/incubator-mxnet/pull/16654#discussion_r366134125
 
 

 ##
 File path: src/imperative/cached_op.h
 ##
 @@ -26,8 +26,195 @@
 #include 
 #include 
 #include 
+#include 
+#include "../operator/operator_common.h"
+#include "../operator/subgraph/common.h"
+#include "./imperative_utils.h"
 
 namespace mxnet {
+namespace {
+
+  static const char FULL[] = "full";
+  static const char FORWARD[] = "forward";
+  static const char BACKWARD[] = "backward";
+  static const char REF_COUNT[] = "ref_count";
+  static const char MEM_PLAN[] = "mem_plan";
+  static const char STORAGE_PLAN[] = "storage_plan";
+
+std::string AddPrefix(const std::string& prefix,
+  const std::string& s) {
+  return prefix + "_" + s;
+}
+
+/* \brief create a forward graph from they Symbol */
+void CreateForwardGraph(const nnvm::Symbol &sym, nnvm::Graph *fwd_graph) {
+  using namespace nnvm;
+  static const auto _copy_op = Op::Get("_copy");
+  {
+NodeEntryMap dedup_out;
+for (const NodeEntry& nodeEntry : sym.outputs) {
+  if (dedup_out.find(nodeEntry) != dedup_out.end()) {
+NodePtr copy_node = Node::Create();
+copy_node->attrs.op = _copy_op;
+copy_node->attrs.name =
+nodeEntry.node->attrs.name + "_copy" + 
std::to_string(dedup_out[nodeEntry]++);
+copy_node->inputs.emplace_back(nodeEntry);
+if (_copy_op->attr_parser != nullptr) {
+  _copy_op->attr_parser(&(copy_node->attrs));
+}
+fwd_graph->outputs.emplace_back(std::move(copy_node));
+  } else {
+dedup_out.emplace(nodeEntry, 0);
+fwd_graph->outputs.push_back(nodeEntry);
+  }
+}
+  }
+}
+
+/* \brief construct  fwd_graph, grad_graph and full_graph from symbol */
+void CreateFullGraph(const nnvm::Symbol& sym,
+ nnvm::Graph* fwd_graph,
+ nnvm::Graph* grad_graph,
+ nnvm::Graph* full_graph,
+ std::vector* ograd_entries,
+ std::unordered_map* 
fwd_input_to_grad_output) {
+  using namespace nnvm;
+  static const std::vector zero_ops{Op::Get("zeros_like"), 
Op::Get("_zeros")};
+  CreateForwardGraph(sym, fwd_graph);
+
+  bool do_elim_common_expr = dmlc::GetEnv("MXNET_ELIMINATE_COMMON_EXPR", true);
+  if (do_elim_common_expr)
+*fwd_graph = exec::EliminateCommonExpr(std::move(*fwd_graph));
+
+  // construct backward graph
+  {
 
 Review comment:
   Added a new function.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] wuxun-zhang commented on issue #17231: cannot quantization example

2020-01-13 Thread GitBox
wuxun-zhang commented on issue #17231: cannot quantization example
URL: 
https://github.com/apache/incubator-mxnet/issues/17231#issuecomment-573973756
 
 
   @zhhoper Any update for this issue?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] ChaiBapchya opened a new pull request #17297: Fix NCCL Cmake autodetect issue

2020-01-13 Thread GitBox
ChaiBapchya opened a new pull request #17297: Fix NCCL Cmake autodetect issue
URL: https://github.com/apache/incubator-mxnet/pull/17297
 
 
   ## Description ##
   Fixes https://github.com/apache/incubator-mxnet/issues/17239
   
   ## Checklist ##
   ### Essentials ###
   - [ ] Changes are complete (i.e. I finished coding on this PR)
   - [ ] Code is well-documented: 
   - [ ] To the best of my knowledge, examples are either not affected by this 
change, or have been fixed to be compatible with this change
   
   ### Changes ###
   modified:   cmake/Modules/FindNCCL.cmake
   
   ## Test ##
   Tested with following command
   ```
   cmake -GNinja -DUSE_CUDA=ON -DCMAKE_CUDA_COMPILER_LAUNCHER=ccache 
-DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache 
-DCMAKE_BUILD_TYPE=Release -DUSE_CUDNN=ON -DUSE_NCCL=ON ..
   ```
   
   Without this branch, it failed with NCCL not found
   ```
   Could NOT find NCCL (missing: NCCL_INCLUDE_DIRS NCCL_LIBRARIES)
   CMake Warning at CMakeLists.txt:636 (message):
 Could not find NCCL libraries
   ```
   
   With this PR, it passes.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] samskalicky commented on a change in pull request #17270: [WIP] Dynamic custom operator GPU support

2020-01-13 Thread GitBox
samskalicky commented on a change in pull request #17270: [WIP] Dynamic custom 
operator GPU support
URL: https://github.com/apache/incubator-mxnet/pull/17270#discussion_r366123444
 
 

 ##
 File path: src/c_api/c_api.cc
 ##
 @@ -720,8 +751,11 @@ int MXLoadLib(const char *path) {
 gradOp.set_attr("TIsLayerOpBackward", true, plevel);
 gradOp.set_attr("FStatefulComputeEx",
 fstateful_backward, plevel);
+gradOp.set_attr("FStatefulComputeEx",
 
 Review comment:
   > What is the target supported contexts for this feature? Do we target just 
cpu and gpu, or we want to support other hardware backends, too?
   
   Yes, lets just target the currently supported contexts. We can swing back 
around later when we add support dynamic loading of contexts. We'll make sure 
to implement something generic in the PR (ie. setting context with string 
rather than enum) so it will just work. 
   
   > Currently the dispatch logic is inside FCompute, which is a bit different 
from existing mxnet users' experience. Usually the FCompute only declares the 
computation, and leave the dispatch logic to MXNet executor. And it's unclear 
how it supports the case where the same op is extended by a library for Intel 
CPUs and NVIDIA GPUs - they may hard-code the dispatch logic to only care about 
their own hardware. How do we handle such conflicts?

   Good catch. We'll change it so that users can specify context and 
Forward/Backward function in the registration. But, for custom operators we can 
only support what the current implementation in MXNet allows. Which is that the 
top level scope is an operator, and it has implementations for different 
contexts.
   
   What you're describing is top level being context and inside of that having 
a distinct operator registration. Like you describe next, this organization is 
not supported in MXNet. We should discuss this as part of a separate feature 
enhancement than this PR (the scope of this PR is to add GPU support to custom 
operators -- only). 
   
   > Furthermore, currently the infer_shape/infer_dtype is not context-aware, 
i.e. CPU and GPU infers the same dtype. However, it may not be true (e.g. cpu 
supports fp32 and bfloat16, and gpu supports fp32 and fp16). How do we handle 
these attribute conflict?
   > 
   > I had a short discussion with @yzhliu and we saw two potential fixes:
   > 
   > 1. make infer_shape/infer_dtype context aware. This way we can have 
different infer_dtype function for cpu & gpu. MXNet needs to dispatch to the 
function based on the current context. For example, 
`op.set_attr("FInferType", my_infer_type_function)` for cpu 
specific type inference, and `op.set_attr("FInferType", 
my_infer_type_function_gpu)`for gpu.
   > 2. Another way is to register ops with different names (e.g. 'cpu_gemm' 
and 'gpu_gemm'). This way they can have different infer_attr functions. But we 
don't want users to modify their model definition in the training script to 
these names. To mitigate that we can have an API to allow user to provide a 
mapping (e.g. {'gemm' -> 'cpu_gemm'}) for mxnet to map an op to another op 
registered in the backend.
   
   This is a good idea, we should have a separate PR to add this new feature to 
MXNet backend, and then extend it out to custom operators. But this is out of 
scope for this PR. 
   
   Another point is that current custom operator support does not allow for 
registering CPU implementations in one library, and GPU implementations for the 
same operator in another. This merging of functionality from different 
libraries is a good idea for a future feature.
   
   > Finally, is there a plan to support dynamic custom context? :P @samskalicky
   
   I'll add it to the list behind: custom data loaders, graph passes, etc... :D


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] larroy commented on a change in pull request #17206: Windows dev environment configuration, update install instructions from source in the docs.

2020-01-13 Thread GitBox
larroy commented on a change in pull request #17206: Windows dev environment 
configuration, update install instructions from source in the docs.
URL: https://github.com/apache/incubator-mxnet/pull/17206#discussion_r366120725
 
 

 ##
 File path: ci/windows_dev_env/requirements.txt
 ##
 @@ -0,0 +1,4 @@
+psutil
+boto3
+python-jenkins
 
 Review comment:
   we can remove it in the future,  it doesn't hurt there now, and I don't want 
additional scripts as of now. I don't think this is a big concern, even though 
you are right.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] larroy commented on issue #15492: No CMAKE_CUDA_COMPILER could be found

2020-01-13 Thread GitBox
larroy commented on issue #15492: No CMAKE_CUDA_COMPILER could be found
URL: 
https://github.com/apache/incubator-mxnet/issues/15492#issuecomment-573959749
 
 
   Let's add a note about CMAKE_CUDA_COMPILER to the install instructions.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] larroy commented on issue #15492: No CMAKE_CUDA_COMPILER could be found

2020-01-13 Thread GitBox
larroy commented on issue #15492: No CMAKE_CUDA_COMPILER could be found
URL: 
https://github.com/apache/incubator-mxnet/issues/15492#issuecomment-573959686
 
 
   with cuda 9.2 as in the CI environment this is not a problem as nvcc is 
added to the path.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] rongzha1 commented on a change in pull request #17265: Add bfloat16 floating-point format support based on AMP

2020-01-13 Thread GitBox
rongzha1 commented on a change in pull request #17265: Add bfloat16 
floating-point format support based on AMP 
URL: https://github.com/apache/incubator-mxnet/pull/17265#discussion_r366115432
 
 

 ##
 File path: include/mxnet/ndarray.h
 ##
 @@ -770,6 +770,12 @@ class NDArray {
*/
   NDArray Reorder2Default() const;
 
+/*
+   * This creates a new NDArray using f32 with the reordered data.
+   * It doesn't affect the data of the original NDArray.
+   */
+  NDArray Reorder2DefaultFp32() const;
 
 Review comment:
   OK, will change it to Reorder2DefaultFloatFormat()


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366111466
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
+
+MXReturnValue forward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [REGISTER_OP(my_op_name) Macro](./gemm_lib.cc#L169):
+* This macro registers custom operator to all MXNet APIs by its name, and 
you need to call setters to bind the above functions to the registered operator.
+
+REGISTER_OP(my_op_name)
+.setForward(forward)
+.setParseAttrs(parseAttrs)
+.setInferType(inferType)
+.setInferShape(inferShape);
+
+Also there are some optional functions you can specify:
+
+* [backward](./gemm_lib.cc#L90) - Backward Gradient function:
+* This function specifies the computation of backward pass of the operator.
+
+MXReturnValue backward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [mutateInputs](./gemm_lib.cc#L214) - Specify mutable input:
+* This function allows you to mark some inputs to be mutable inputs, 
useful when using aux parameters for BatchNorm-like operators.
+
+MXReturnValue mutateInputs(
+std::map attrs,
+std::vector &input_indices)
+
+Let’s take a closer look at those registry functions:
+
+* **parseAttrs**: This function takes 3 arguments. 1st argument is an input, 
which is the attributes passed all the way from Python code. When user calls 
`mx.nd.my_op_name(s,t,keyword=1)`, the keyword is passed to the

[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366110748
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
+
+MXReturnValue forward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [REGISTER_OP(my_op_name) Macro](./gemm_lib.cc#L169):
+* This macro registers custom operator to all MXNet APIs by its name, and 
you need to call setters to bind the above functions to the registered operator.
 
 Review comment:
   ```suggestion
   * This macro registers the custom operator to all of the MXNet APIs by 
its name. You need to call setters to bind the above functions to the 
registered operator.
   ```
   Is the last sentence clear enough? I'm not really sure what you mean.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r36650
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
+
+MXReturnValue forward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [REGISTER_OP(my_op_name) Macro](./gemm_lib.cc#L169):
+* This macro registers custom operator to all MXNet APIs by its name, and 
you need to call setters to bind the above functions to the registered operator.
+
+REGISTER_OP(my_op_name)
+.setForward(forward)
+.setParseAttrs(parseAttrs)
+.setInferType(inferType)
+.setInferShape(inferShape);
+
+Also there are some optional functions you can specify:
+
+* [backward](./gemm_lib.cc#L90) - Backward Gradient function:
+* This function specifies the computation of backward pass of the operator.
+
+MXReturnValue backward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [mutateInputs](./gemm_lib.cc#L214) - Specify mutable input:
+* This function allows you to mark some inputs to be mutable inputs, 
useful when using aux parameters for BatchNorm-like operators.
+
+MXReturnValue mutateInputs(
+std::map attrs,
+std::vector &input_indices)
+
+Let’s take a closer look at those registry functions:
+
+* **parseAttrs**: This function takes 3 arguments. 1st argument is an input, 
which is the attributes passed all the way from Python code. When user calls 
`mx.nd.my_op_name(s,t,keyword=1)`, the keyword is passed to the

[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366112551
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
+
+MXReturnValue forward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [REGISTER_OP(my_op_name) Macro](./gemm_lib.cc#L169):
+* This macro registers custom operator to all MXNet APIs by its name, and 
you need to call setters to bind the above functions to the registered operator.
+
+REGISTER_OP(my_op_name)
+.setForward(forward)
+.setParseAttrs(parseAttrs)
+.setInferType(inferType)
+.setInferShape(inferShape);
+
+Also there are some optional functions you can specify:
+
+* [backward](./gemm_lib.cc#L90) - Backward Gradient function:
+* This function specifies the computation of backward pass of the operator.
+
+MXReturnValue backward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [mutateInputs](./gemm_lib.cc#L214) - Specify mutable input:
+* This function allows you to mark some inputs to be mutable inputs, 
useful when using aux parameters for BatchNorm-like operators.
+
+MXReturnValue mutateInputs(
+std::map attrs,
+std::vector &input_indices)
+
+Let’s take a closer look at those registry functions:
+
+* **parseAttrs**: This function takes 3 arguments. 1st argument is an input, 
which is the attributes passed all the way from Python code. When user calls 
`mx.nd.my_op_name(s,t,keyword=1)`, the keyword is passed to the

[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366112812
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
+
+MXReturnValue forward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [REGISTER_OP(my_op_name) Macro](./gemm_lib.cc#L169):
+* This macro registers custom operator to all MXNet APIs by its name, and 
you need to call setters to bind the above functions to the registered operator.
+
+REGISTER_OP(my_op_name)
+.setForward(forward)
+.setParseAttrs(parseAttrs)
+.setInferType(inferType)
+.setInferShape(inferShape);
+
+Also there are some optional functions you can specify:
+
+* [backward](./gemm_lib.cc#L90) - Backward Gradient function:
+* This function specifies the computation of backward pass of the operator.
+
+MXReturnValue backward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [mutateInputs](./gemm_lib.cc#L214) - Specify mutable input:
+* This function allows you to mark some inputs to be mutable inputs, 
useful when using aux parameters for BatchNorm-like operators.
+
+MXReturnValue mutateInputs(
+std::map attrs,
+std::vector &input_indices)
+
+Let’s take a closer look at those registry functions:
+
+* **parseAttrs**: This function takes 3 arguments. 1st argument is an input, 
which is the attributes passed all the way from Python code. When user calls 
`mx.nd.my_op_name(s,t,keyword=1)`, the keyword is passed to the

[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366108808
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
 
 Review comment:
   Sometimes one of the transpilers will complain that this is too short. 
Recommend making it longer to match the title.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366110115
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
 
 Review comment:
   ```suggestion
   * **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invokes the operator using both NDArray and Symbol APIs, and prints 
outputs of the forward and backward passes. The outputs should be the same as 
the regular MXNet `gemm` operator.
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366112461
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
+
+MXReturnValue forward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [REGISTER_OP(my_op_name) Macro](./gemm_lib.cc#L169):
+* This macro registers custom operator to all MXNet APIs by its name, and 
you need to call setters to bind the above functions to the registered operator.
+
+REGISTER_OP(my_op_name)
+.setForward(forward)
+.setParseAttrs(parseAttrs)
+.setInferType(inferType)
+.setInferShape(inferShape);
+
+Also there are some optional functions you can specify:
+
+* [backward](./gemm_lib.cc#L90) - Backward Gradient function:
+* This function specifies the computation of backward pass of the operator.
+
+MXReturnValue backward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [mutateInputs](./gemm_lib.cc#L214) - Specify mutable input:
+* This function allows you to mark some inputs to be mutable inputs, 
useful when using aux parameters for BatchNorm-like operators.
+
+MXReturnValue mutateInputs(
+std::map attrs,
+std::vector &input_indices)
+
+Let’s take a closer look at those registry functions:
+
+* **parseAttrs**: This function takes 3 arguments. 1st argument is an input, 
which is the attributes passed all the way from Python code. When user calls 
`mx.nd.my_op_name(s,t,keyword=1)`, the keyword is passed to the

[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366112292
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
+
+MXReturnValue forward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [REGISTER_OP(my_op_name) Macro](./gemm_lib.cc#L169):
+* This macro registers custom operator to all MXNet APIs by its name, and 
you need to call setters to bind the above functions to the registered operator.
+
+REGISTER_OP(my_op_name)
+.setForward(forward)
+.setParseAttrs(parseAttrs)
+.setInferType(inferType)
+.setInferShape(inferShape);
+
+Also there are some optional functions you can specify:
+
+* [backward](./gemm_lib.cc#L90) - Backward Gradient function:
+* This function specifies the computation of backward pass of the operator.
+
+MXReturnValue backward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [mutateInputs](./gemm_lib.cc#L214) - Specify mutable input:
+* This function allows you to mark some inputs to be mutable inputs, 
useful when using aux parameters for BatchNorm-like operators.
+
+MXReturnValue mutateInputs(
+std::map attrs,
+std::vector &input_indices)
+
+Let’s take a closer look at those registry functions:
+
+* **parseAttrs**: This function takes 3 arguments. 1st argument is an input, 
which is the attributes passed all the way from Python code. When user calls 
`mx.nd.my_op_name(s,t,keyword=1)`, the keyword is passed to the

[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366111871
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
+
+MXReturnValue forward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [REGISTER_OP(my_op_name) Macro](./gemm_lib.cc#L169):
+* This macro registers custom operator to all MXNet APIs by its name, and 
you need to call setters to bind the above functions to the registered operator.
+
+REGISTER_OP(my_op_name)
+.setForward(forward)
+.setParseAttrs(parseAttrs)
+.setInferType(inferType)
+.setInferShape(inferShape);
+
+Also there are some optional functions you can specify:
+
+* [backward](./gemm_lib.cc#L90) - Backward Gradient function:
+* This function specifies the computation of backward pass of the operator.
+
+MXReturnValue backward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [mutateInputs](./gemm_lib.cc#L214) - Specify mutable input:
+* This function allows you to mark some inputs to be mutable inputs, 
useful when using aux parameters for BatchNorm-like operators.
+
+MXReturnValue mutateInputs(
+std::map attrs,
+std::vector &input_indices)
+
+Let’s take a closer look at those registry functions:
+
+* **parseAttrs**: This function takes 3 arguments. 1st argument is an input, 
which is the attributes passed all the way from Python code. When user calls 
`mx.nd.my_op_name(s,t,keyword=1)`, the keyword is passed to the

[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366110539
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
 
 Review comment:
   ```suggestion
   * This function specifies the computation of the forward pass of the 
operator.
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366110828
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
+
+MXReturnValue forward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [REGISTER_OP(my_op_name) Macro](./gemm_lib.cc#L169):
+* This macro registers custom operator to all MXNet APIs by its name, and 
you need to call setters to bind the above functions to the registered operator.
+
+REGISTER_OP(my_op_name)
+.setForward(forward)
+.setParseAttrs(parseAttrs)
+.setInferType(inferType)
+.setInferShape(inferShape);
+
+Also there are some optional functions you can specify:
+
+* [backward](./gemm_lib.cc#L90) - Backward Gradient function:
+* This function specifies the computation of backward pass of the operator.
 
 Review comment:
   ```suggestion
   * This function specifies the computation of the backward pass of the 
operator.
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366109842
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
 
 Review comment:
   ```suggestion
   2. Run `python test_gemm.py`. It’ll first load the above .so library, find 
the operators, register them in the MXNet backend, print "Found x operators", 
then invoke the operator like a regular MXNet operator and output the result.
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366110882
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
+
+MXReturnValue forward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [REGISTER_OP(my_op_name) Macro](./gemm_lib.cc#L169):
+* This macro registers custom operator to all MXNet APIs by its name, and 
you need to call setters to bind the above functions to the registered operator.
+
+REGISTER_OP(my_op_name)
+.setForward(forward)
+.setParseAttrs(parseAttrs)
+.setInferType(inferType)
+.setInferShape(inferShape);
+
+Also there are some optional functions you can specify:
+
+* [backward](./gemm_lib.cc#L90) - Backward Gradient function:
+* This function specifies the computation of backward pass of the operator.
+
+MXReturnValue backward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [mutateInputs](./gemm_lib.cc#L214) - Specify mutable input:
+* This function allows you to mark some inputs to be mutable inputs, 
useful when using aux parameters for BatchNorm-like operators.
 
 Review comment:
   ```suggestion
   * This function allows you to mark some inputs to be mutable inputs. It 
is useful when using aux parameters for BatchNorm-like operators.
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the s

[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366109555
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
 
 Review comment:
   ```suggestion
   You can start getting familiar with custom operators by running some 
examples provided in the **example/extensions/lib_custom_op** directory. Start 
with a common linear algebra operator like `gemm` (Generalized Matrix 
Multiplication). Go to `lib_custom_op` directory and follow these steps:
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366109930
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
 
 Review comment:
   ```suggestion
   * **lib_custom_op/gemm_lib.cc**: This file has a source code implementation 
of all required components of a custom operator, as well as the registration of 
the custom operator.
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366112087
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
+
+MXReturnValue forward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [REGISTER_OP(my_op_name) Macro](./gemm_lib.cc#L169):
+* This macro registers custom operator to all MXNet APIs by its name, and 
you need to call setters to bind the above functions to the registered operator.
+
+REGISTER_OP(my_op_name)
+.setForward(forward)
+.setParseAttrs(parseAttrs)
+.setInferType(inferType)
+.setInferShape(inferShape);
+
+Also there are some optional functions you can specify:
+
+* [backward](./gemm_lib.cc#L90) - Backward Gradient function:
+* This function specifies the computation of backward pass of the operator.
+
+MXReturnValue backward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [mutateInputs](./gemm_lib.cc#L214) - Specify mutable input:
+* This function allows you to mark some inputs to be mutable inputs, 
useful when using aux parameters for BatchNorm-like operators.
+
+MXReturnValue mutateInputs(
+std::map attrs,
+std::vector &input_indices)
+
+Let’s take a closer look at those registry functions:
+
+* **parseAttrs**: This function takes 3 arguments. 1st argument is an input, 
which is the attributes passed all the way from Python code. When user calls 
`mx.nd.my_op_name(s,t,keyword=1)`, the keyword is passed to the

[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366110448
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
 
 Review comment:
   ```suggestion
   * This function specifies how the custom operator infers output data 
types using input data types.
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366109718
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
 
 Review comment:
   ```suggestion
   1. Run `make gemm_lib`. The Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from `gemm_lib.cc`. This is the library you are 
going to load that contains everything for the custom gemm operator.
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366113614
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
 
 Review comment:
   I think this is missing a transition. How do I go from running this basic 
example to consuming the following info for my own op? Maybe even a simple 
example of customization for a particular use case would help.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366109160
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
 
 Review comment:
   Introduction? What are we going to accomplish in this example?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366110345
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
 
 Review comment:
   Should look into the Sphinx plugin that facilitates this, so you don't use a 
line number that's gonna move.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366111402
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
+
+MXReturnValue inferShape(
+std::map attrs,
+std::vector> &inshapes,
+std::vector> &outshapes)
+
+* [forward](./gemm_lib.cc#L56) - Forward function:
+* This function specifies the computation of forward pass of the operator.
+
+MXReturnValue forward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [REGISTER_OP(my_op_name) Macro](./gemm_lib.cc#L169):
+* This macro registers custom operator to all MXNet APIs by its name, and 
you need to call setters to bind the above functions to the registered operator.
+
+REGISTER_OP(my_op_name)
+.setForward(forward)
+.setParseAttrs(parseAttrs)
+.setInferType(inferType)
+.setInferShape(inferShape);
+
+Also there are some optional functions you can specify:
+
+* [backward](./gemm_lib.cc#L90) - Backward Gradient function:
+* This function specifies the computation of backward pass of the operator.
+
+MXReturnValue backward(
+std::map attrs,
+std::vector inputs,
+std::vector outputs,
+OpResource res)
+
+* [mutateInputs](./gemm_lib.cc#L214) - Specify mutable input:
+* This function allows you to mark some inputs to be mutable inputs, 
useful when using aux parameters for BatchNorm-like operators.
+
+MXReturnValue mutateInputs(
+std::map attrs,
+std::vector &input_indices)
+
+Let’s take a closer look at those registry functions:
+
+* **parseAttrs**: This function takes 3 arguments. 1st argument is an input, 
which is the attributes passed all the way from Python code. When user calls 
`mx.nd.my_op_name(s,t,keyword=1)`, the keyword is passed to the

[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366110494
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
+
+First you should install MXNet either from compiling from source code or 
download from nightly build. It doesn’t matter if the build comes with CUDA or 
MKLDNN. The custom operator doesn’t interact with the execution of other native 
MXNet operators.
+
+### Run An Example:
+
+You can start getting familiar with custom operator by running some examples 
we provide in the **example/extensions/lib_custom_op** directory. Let’s start 
with gemm (Generalized Matrix Multiplication) operator, a common linear algebra 
operator. Go to that directory and follow the steps:
+
+1. run `make gemm_lib`, the Makefile will generate a dynamic library 
**libgemm_lib.so** compiled from gemm_lib.cc. This is the library you are going 
to load that contains everything of the custom gemm operator.
+2. run `python test_gemm.py`, and it’ll first load the above .so library, find 
operators,  register them in the MXNet backend, print "Found x operators"; then 
invoke the operator like a regular MXNet operator and output the result.
+
+### Basic Files For Gemm Library:
+
+* **lib_custom_op/gemm_lib.cc**: This file has source code implementation of 
all required components of a custom operator, as well as the registration of 
the custom operator.
+
+* **lib_custom_op/Makefile**: Compile source code to a dynamic shared library, 
with a header file **include/mxnet/lib_api.h** from MXNet source code. 
Currently the custom operator is compatible with C++11 onwards.
+
+* **lib_custom_op/test_gemm.py**: This file calls 
`mx.library.load(‘libgemm_lib.so’)` to load the library containing the custom 
operator, invoke the operator using both ndarray and symbol API, and print 
outputs of forward and backward pass. The outputs should be the same as the 
regular MXNet gemm operator.
+
+## Writing Custom Operators:
+
+### Regular Custom Operator:
+
+There are several basic building blocks for making a (stateless) custom 
operator:
+
+* [parseAttrs](./gemm_lib.cc#L118) - Attribute Parser:
+* This function specifies number of input and output tensors for the 
custom operator; also this is where a custom operator can validate the 
attributes (ie. options) specified by the user.
+
+MXReturnValue parseAttrs(
+std::map attrs,
+int* num_in,
+int* num_out)
+
+
+* [inferType](./gemm_lib.cc#L124) - Type Inference:
+* This function specifies how custom operator infers output data types 
using input data types.
+
+MXReturnValue inferType(
+std::map attrs,
+std::vector &intypes,
+std::vector &outtypes)
+
+* [inferShape](./gemm_lib.cc#L143) - Shape Inference:
+* This function specifies how custom operator infers output tensor shape 
using input shape.
 
 Review comment:
   ```suggestion
   * This function specifies how the custom operator infers output tensor 
shape using input shape.
   ```


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on a change in pull request #17241: Add CustomOp tutorial doc

2020-01-13 Thread GitBox
aaronmarkham commented on a change in pull request #17241: Add CustomOp 
tutorial doc
URL: https://github.com/apache/incubator-mxnet/pull/17241#discussion_r366108930
 
 

 ##
 File path: example/extensions/lib_custom_op/README.md
 ##
 @@ -0,0 +1,118 @@
+CustomOp Example and Tutorial
+
+
+## Getting Started
+
+### Have MXNet Ready:
 
 Review comment:
   Colons aren't needed in the titles.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] marcoabreu commented on a change in pull request #17206: Windows dev environment configuration, update install instructions from source in the docs.

2020-01-13 Thread GitBox
marcoabreu commented on a change in pull request #17206: Windows dev 
environment configuration, update install instructions from source in the docs.
URL: https://github.com/apache/incubator-mxnet/pull/17206#discussion_r366109320
 
 

 ##
 File path: ci/windows_dev_env/windows_deps_headless_installer.py
 ##
 @@ -0,0 +1,373 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+"""Dependency installer for Windows"""
+
+__author__ = 'Pedro Larroy, Chance Bair'
+__version__ = '0.2'
+
+import argparse
+import errno
+import logging
+import os
+import psutil
+import shutil
+import subprocess
+import urllib
+import stat
+import tempfile
+import zipfile
+from time import sleep
+from urllib.error import HTTPError
+import logging
+from subprocess import check_output, check_call
+import re
+import sys
+import urllib.request
+
+import ssl
+
+ssl._create_default_https_context = ssl._create_unverified_context
+
+log = logging.getLogger(__name__)
+
+
+DEPS = {
+'openblas': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/OpenBLAS-windows-v0_2_19.zip',
+'opencv': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/opencv-windows-4.1.2-vc14_vc15.zip',
+'cudnn': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/cudnn-9.2-windows10-x64-v7.4.2.24.zip',
+'nvdriver': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/nvidia_display_drivers_398.75_server2016.zip',
+'cmake': 
'https://github.com/Kitware/CMake/releases/download/v3.16.2/cmake-3.16.2-win64-x64.msi'
+}
+
+DEFAULT_SUBPROCESS_TIMEOUT=3600
+
+
+def retry(target_exception, tries=4, delay_s=1, backoff=2):
+"""Retry calling the decorated function using an exponential backoff.
+
+http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
+original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
+
+:param target_exception: the exception to check. may be a tuple of
+exceptions to check
+:type target_exception: Exception or tuple
+:param tries: number of times to try (not retry) before giving up
+:type tries: int
+:param delay_s: initial delay between retries in seconds
+:type delay_s: int
+:param backoff: backoff multiplier e.g. value of 2 will double the delay
+each retry
+:type backoff: int
+"""
+import time
+from functools import wraps
+
+def decorated_retry(f):
+@wraps(f)
+def f_retry(*args, **kwargs):
+mtries, mdelay = tries, delay_s
+while mtries > 1:
+try:
+return f(*args, **kwargs)
+except target_exception as e:
+logging.warning("Exception: %s, Retrying in %d 
seconds...", str(e), mdelay)
+time.sleep(mdelay)
+mtries -= 1
+mdelay *= backoff
+return f(*args, **kwargs)
+
+return f_retry  # true decorator
+
+return decorated_retry
+
+
+@retry((ValueError, OSError, HTTPError), tries=5, delay_s=2, backoff=5)
+def download(url, dest=None, progress=True) -> str:
+from urllib.request import urlopen
+from urllib.parse import (urlparse, urlunparse)
+import progressbar
+import http.client
+
+class ProgressCB():
+def __init__(self):
+self.pbar = None
+
+def __call__(self, block_num, block_size, total_size):
+if not self.pbar and total_size > 0:
+self.pbar = progressbar.bar.ProgressBar(max_value=total_size)
+downloaded = block_num * block_size
+if self.pbar:
+if downloaded < total_size:
+self.pbar.update(downloaded)
+else:
+self.pbar.finish()
+if dest and os.path.isdir(dest):
+local_file = os.path.split(urlparse(url).path)[1]
+local_path = os.path.normpath(os.path.join(dest, local_file))
+else:
+local_path = dest
+with urlopen(url) as c:
+content_length = c.getheader('content-length')
+length = int(content_length) if content_length and isinstance(c, 
http.client.HTTPResponse) else None
+if length and local_path and os.path.exists(loca

[GitHub] [incubator-mxnet] larroy commented on a change in pull request #17294: fix build from source instruction

2020-01-13 Thread GitBox
larroy commented on a change in pull request #17294: fix build from source 
instruction
URL: https://github.com/apache/incubator-mxnet/pull/17294#discussion_r366109119
 
 

 ##
 File path: docs/static_site/src/pages/get_started/ubuntu_setup.md
 ##
 @@ -63,11 +63,11 @@ Then download [cuDNN 
7.1.4](https://developer.nvidia.com/cudnn).
 Unzip the file and change to the cuDNN root directory. Move the header and 
libraries to your local CUDA Toolkit folder:
 
 ```bash
-tar xvzf cudnn-9.2-linux-x64-v7.1
-sudo cp -P cuda/include/cudnn.h /usr/local/cuda/include
 
 Review comment:
   this shouldn't be necessary. 


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] marcoabreu commented on a change in pull request #17206: Windows dev environment configuration, update install instructions from source in the docs.

2020-01-13 Thread GitBox
marcoabreu commented on a change in pull request #17206: Windows dev 
environment configuration, update install instructions from source in the docs.
URL: https://github.com/apache/incubator-mxnet/pull/17206#discussion_r366109226
 
 

 ##
 File path: ci/windows_dev_env/requirements.txt
 ##
 @@ -0,0 +1,4 @@
+psutil
+boto3
+python-jenkins
 
 Review comment:
   Well yeah it's used as part of the Autoconnect script. But since you are 
recommending this for users to install, it seems a bit out of place. We should 
handle infrastructure and MXNet related things separately if possible.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] larroy commented on issue #17294: fix build from source instruction

2020-01-13 Thread GitBox
larroy commented on issue #17294: fix build from source instruction
URL: https://github.com/apache/incubator-mxnet/pull/17294#issuecomment-573949265
 
 
   Why do we have 2 build from source pages, this is all extremely confusing.
   
   https://mxnet.apache.org/get_started/ubuntu_setup.html
   https://mxnet.apache.org/get_started/build_from_source


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] stephenrawls commented on issue #17292: Can't run horovod with latest nightly wheel

2020-01-13 Thread GitBox
stephenrawls commented on issue #17292: Can't run horovod with latest nightly 
wheel
URL: 
https://github.com/apache/incubator-mxnet/issues/17292#issuecomment-573948848
 
 
   Saw this on email list and got curious...
   
   
   Looks like problem is probably this commit:
   
https://github.com/apache/incubator-mxnet/commit/4ed14e2b749743a014121f57b265675fa7b4c06d#diff-875aa4c013dbd73b044531e439e8afdd
   
   Basically `MXAPIHandleException` used to be defined inline in the header 
file, so all consumers had to do was 
   ```
   #include 
   ```
   
   But now as of 3 days ago it is not an inline function anymore. Meaning that 
consumers need to make sure to link against c_api_error.o to get the symbol.
   
   I don't know enough about the build system that produces these nightly 
builds (does it use the CMake one or the Makefile one?) ... but my hunch would 
be that either c_api_error.o is not getting built into libmxnet.so. Or somehow 
it is, but the order it is presented to the linker is before 
MXAPIHandleException is used, so that symbol isn't included in libmxnet.so.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] larroy commented on a change in pull request #17294: fix build from source instruction

2020-01-13 Thread GitBox
larroy commented on a change in pull request #17294: fix build from source 
instruction
URL: https://github.com/apache/incubator-mxnet/pull/17294#discussion_r366108454
 
 

 ##
 File path: docs/static_site/src/pages/get_started/ubuntu_setup.md
 ##
 @@ -63,11 +63,11 @@ Then download [cuDNN 
7.1.4](https://developer.nvidia.com/cudnn).
 Unzip the file and change to the cuDNN root directory. Move the header and 
libraries to your local CUDA Toolkit folder:
 
 ```bash
-tar xvzf cudnn-9.2-linux-x64-v7.1
 
 Review comment:
   Why are we recommending cuda 9.2 in the install instructions?  Most are 
using 10.1 and 10.2


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] mjsML commented on issue #17181: Fail to build mxnet from source

2020-01-13 Thread GitBox
mjsML commented on issue #17181: Fail to build mxnet from source
URL: 
https://github.com/apache/incubator-mxnet/issues/17181#issuecomment-573948456
 
 
   agreed ... as I mentioned above it's a well documented 
[issue](https://github.com/xianyi/OpenBLAS/wiki/faq#debianlts) with OpenBLAS 
and debian based distros.
   
   > Debian and Ubuntu LTS versions provide OpenBLAS package which is not 
updated after initial release, and under circumstances one might want to use 
more recent version of OpenBLAS e.g. to get support for newer CPUs
   
   >Ubuntu and Debian provides 'alternatives' mechanism to comfortably replace 
BLAS and LAPACK libraries systemwide.
   
   >After successful build of OpenBLAS (with DYNAMIC_ARCH set to 1)
   
   ```
   $ make clean
   $ make DYNAMIC_ARCH=1
   $ sudo make DYNAMIC_ARCH=1 install
   ```
   Basically the symbol mentioned above is missing from the default binaries 
that ships with debian based distros, so OpenBLAS needs to be rebuilt from 
latest source with this flag to force the compiler to output the symbol that 
the linker here is looking for(in this instance the old "gotoblas" symbol).


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] aaronmarkham commented on issue #17284: Fix language selection in get_started options.js

2020-01-13 Thread GitBox
aaronmarkham commented on issue #17284: Fix language selection in get_started 
options.js
URL: https://github.com/apache/incubator-mxnet/pull/17284#issuecomment-573948047
 
 
   Can I preview this change somewhere?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] guanxinq opened a new pull request #17296: fix errors for partition given a backend

2020-01-13 Thread GitBox
guanxinq opened a new pull request #17296: fix errors for partition given a 
backend
URL: https://github.com/apache/incubator-mxnet/pull/17296
 
 
   ## Description ##
   This PR fixes https://github.com/apache/incubator-mxnet/issues/17285. Tests 
for graph partitioning  given a backend now are able to run separately. 
   
   ## Checklist ##
   ### Essentials ###
   Please feel free to remove inapplicable items for your PR.
   - [ ] The PR title starts with [MXNET-$JIRA_ID], where $JIRA_ID refers to 
the relevant [JIRA issue](https://issues.apache.org/jira/projects/MXNET/issues) 
created (except PRs with tiny changes)
   - [X] Changes are complete (i.e. I finished coding on this PR)
   - [X] All changes have test coverage:
   - Unit tests are added for small changes to verify correctness (e.g. adding 
a new operator)
   - Nightly tests are added for complicated/long-running ones (e.g. changing 
distributed kvstore)
   - Build tests will be added for build configuration changes (e.g. adding a 
new build option with NCCL)
   - [X] Code is well-documented: 
   - For user-facing API changes, API doc string has been updated. 
   - For new C++ functions in header files, their functionalities and arguments 
are documented. 
   - For new examples, README.md is added to explain the what the example does, 
the source of the dataset, expected performance on test set and reference to 
the original paper if applicable
   - Check the API doc at 
https://mxnet-ci-doc.s3-accelerate.dualstack.amazonaws.com/PR-$PR_ID/$BUILD_ID/index.html
   - [X] To the best of my knowledge, examples are either not affected by this 
change, or have been fixed to be compatible with this change
   
   ### Changes ###
   - [ ] Feature1, tests, (and when applicable, API doc)
   - [ ] Feature2, tests, (and when applicable, API doc)
   
   ## Comments ##
   - If this change is a backward incompatible change, why must this change be 
made.
   - Interesting edge cases to note here
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] larroy commented on a change in pull request #17206: Windows dev environment configuration, update install instructions from source in the docs.

2020-01-13 Thread GitBox
larroy commented on a change in pull request #17206: Windows dev environment 
configuration, update install instructions from source in the docs.
URL: https://github.com/apache/incubator-mxnet/pull/17206#discussion_r366106905
 
 

 ##
 File path: ci/windows_dev_env/windows_deps_headless_installer.py
 ##
 @@ -0,0 +1,373 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+"""Dependency installer for Windows"""
+
+__author__ = 'Pedro Larroy, Chance Bair'
+__version__ = '0.2'
+
+import argparse
+import errno
+import logging
+import os
+import psutil
+import shutil
+import subprocess
+import urllib
+import stat
+import tempfile
+import zipfile
+from time import sleep
+from urllib.error import HTTPError
+import logging
+from subprocess import check_output, check_call
+import re
+import sys
+import urllib.request
+
+import ssl
+
+ssl._create_default_https_context = ssl._create_unverified_context
+
+log = logging.getLogger(__name__)
+
+
+DEPS = {
+'openblas': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/OpenBLAS-windows-v0_2_19.zip',
+'opencv': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/opencv-windows-4.1.2-vc14_vc15.zip',
+'cudnn': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/cudnn-9.2-windows10-x64-v7.4.2.24.zip',
+'nvdriver': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/nvidia_display_drivers_398.75_server2016.zip',
+'cmake': 
'https://github.com/Kitware/CMake/releases/download/v3.16.2/cmake-3.16.2-win64-x64.msi'
+}
+
+DEFAULT_SUBPROCESS_TIMEOUT=3600
+
+
+def retry(target_exception, tries=4, delay_s=1, backoff=2):
+"""Retry calling the decorated function using an exponential backoff.
+
+http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
+original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
+
+:param target_exception: the exception to check. may be a tuple of
+exceptions to check
+:type target_exception: Exception or tuple
+:param tries: number of times to try (not retry) before giving up
+:type tries: int
+:param delay_s: initial delay between retries in seconds
+:type delay_s: int
+:param backoff: backoff multiplier e.g. value of 2 will double the delay
+each retry
+:type backoff: int
+"""
+import time
+from functools import wraps
+
+def decorated_retry(f):
+@wraps(f)
+def f_retry(*args, **kwargs):
+mtries, mdelay = tries, delay_s
+while mtries > 1:
+try:
+return f(*args, **kwargs)
+except target_exception as e:
+logging.warning("Exception: %s, Retrying in %d 
seconds...", str(e), mdelay)
+time.sleep(mdelay)
+mtries -= 1
+mdelay *= backoff
+return f(*args, **kwargs)
+
+return f_retry  # true decorator
+
+return decorated_retry
+
+
+@retry((ValueError, OSError, HTTPError), tries=5, delay_s=2, backoff=5)
+def download(url, dest=None, progress=True) -> str:
+from urllib.request import urlopen
+from urllib.parse import (urlparse, urlunparse)
+import progressbar
+import http.client
+
+class ProgressCB():
+def __init__(self):
+self.pbar = None
+
+def __call__(self, block_num, block_size, total_size):
+if not self.pbar and total_size > 0:
+self.pbar = progressbar.bar.ProgressBar(max_value=total_size)
+downloaded = block_num * block_size
+if self.pbar:
+if downloaded < total_size:
+self.pbar.update(downloaded)
+else:
+self.pbar.finish()
+if dest and os.path.isdir(dest):
+local_file = os.path.split(urlparse(url).path)[1]
+local_path = os.path.normpath(os.path.join(dest, local_file))
+else:
+local_path = dest
+with urlopen(url) as c:
+content_length = c.getheader('content-length')
+length = int(content_length) if content_length and isinstance(c, 
http.client.HTTPResponse) else None
+if length and local_path and os.path.exists(local_pa

[GitHub] [incubator-mxnet] larroy commented on a change in pull request #17206: Windows dev environment configuration, update install instructions from source in the docs.

2020-01-13 Thread GitBox
larroy commented on a change in pull request #17206: Windows dev environment 
configuration, update install instructions from source in the docs.
URL: https://github.com/apache/incubator-mxnet/pull/17206#discussion_r366106905
 
 

 ##
 File path: ci/windows_dev_env/windows_deps_headless_installer.py
 ##
 @@ -0,0 +1,373 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+"""Dependency installer for Windows"""
+
+__author__ = 'Pedro Larroy, Chance Bair'
+__version__ = '0.2'
+
+import argparse
+import errno
+import logging
+import os
+import psutil
+import shutil
+import subprocess
+import urllib
+import stat
+import tempfile
+import zipfile
+from time import sleep
+from urllib.error import HTTPError
+import logging
+from subprocess import check_output, check_call
+import re
+import sys
+import urllib.request
+
+import ssl
+
+ssl._create_default_https_context = ssl._create_unverified_context
+
+log = logging.getLogger(__name__)
+
+
+DEPS = {
+'openblas': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/OpenBLAS-windows-v0_2_19.zip',
+'opencv': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/opencv-windows-4.1.2-vc14_vc15.zip',
+'cudnn': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/cudnn-9.2-windows10-x64-v7.4.2.24.zip',
+'nvdriver': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/nvidia_display_drivers_398.75_server2016.zip',
+'cmake': 
'https://github.com/Kitware/CMake/releases/download/v3.16.2/cmake-3.16.2-win64-x64.msi'
+}
+
+DEFAULT_SUBPROCESS_TIMEOUT=3600
+
+
+def retry(target_exception, tries=4, delay_s=1, backoff=2):
+"""Retry calling the decorated function using an exponential backoff.
+
+http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
+original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
+
+:param target_exception: the exception to check. may be a tuple of
+exceptions to check
+:type target_exception: Exception or tuple
+:param tries: number of times to try (not retry) before giving up
+:type tries: int
+:param delay_s: initial delay between retries in seconds
+:type delay_s: int
+:param backoff: backoff multiplier e.g. value of 2 will double the delay
+each retry
+:type backoff: int
+"""
+import time
+from functools import wraps
+
+def decorated_retry(f):
+@wraps(f)
+def f_retry(*args, **kwargs):
+mtries, mdelay = tries, delay_s
+while mtries > 1:
+try:
+return f(*args, **kwargs)
+except target_exception as e:
+logging.warning("Exception: %s, Retrying in %d 
seconds...", str(e), mdelay)
+time.sleep(mdelay)
+mtries -= 1
+mdelay *= backoff
+return f(*args, **kwargs)
+
+return f_retry  # true decorator
+
+return decorated_retry
+
+
+@retry((ValueError, OSError, HTTPError), tries=5, delay_s=2, backoff=5)
+def download(url, dest=None, progress=True) -> str:
+from urllib.request import urlopen
+from urllib.parse import (urlparse, urlunparse)
+import progressbar
+import http.client
+
+class ProgressCB():
+def __init__(self):
+self.pbar = None
+
+def __call__(self, block_num, block_size, total_size):
+if not self.pbar and total_size > 0:
+self.pbar = progressbar.bar.ProgressBar(max_value=total_size)
+downloaded = block_num * block_size
+if self.pbar:
+if downloaded < total_size:
+self.pbar.update(downloaded)
+else:
+self.pbar.finish()
+if dest and os.path.isdir(dest):
+local_file = os.path.split(urlparse(url).path)[1]
+local_path = os.path.normpath(os.path.join(dest, local_file))
+else:
+local_path = dest
+with urlopen(url) as c:
+content_length = c.getheader('content-length')
+length = int(content_length) if content_length and isinstance(c, 
http.client.HTTPResponse) else None
+if length and local_path and os.path.exists(local_pa

[GitHub] [incubator-mxnet] larroy commented on a change in pull request #17206: Windows dev environment configuration, update install instructions from source in the docs.

2020-01-13 Thread GitBox
larroy commented on a change in pull request #17206: Windows dev environment 
configuration, update install instructions from source in the docs.
URL: https://github.com/apache/incubator-mxnet/pull/17206#discussion_r366106743
 
 

 ##
 File path: ci/windows_dev_env/requirements.txt
 ##
 @@ -0,0 +1,4 @@
+psutil
+boto3
+python-jenkins
 
 Review comment:
   I think you added it, is it in the slave scripts?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] larroy commented on a change in pull request #17206: Windows dev environment configuration, update install instructions from source in the docs.

2020-01-13 Thread GitBox
larroy commented on a change in pull request #17206: Windows dev environment 
configuration, update install instructions from source in the docs.
URL: https://github.com/apache/incubator-mxnet/pull/17206#discussion_r366106691
 
 

 ##
 File path: ci/windows_dev_env/setup.ps1
 ##
 @@ -0,0 +1,57 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+$ErrorActionPreference = "Stop"
+Set-StrictMode -Version Latest
+function Check-Call {
+param (
+[scriptblock]$ScriptBlock
+)
+Write-Host "Executing $ScriptBlock"
+& @ScriptBlock
+if (($lastexitcode -ne 0)) {
+   Write-Error "Execution failed with $lastexitcode"
+exit $lastexitcode
+}
+}
+Set-ExecutionPolicy Bypass -Scope Process -Force
+Invoke-WebRequest -Uri https://chocolatey.org/install.ps1 -OutFile install.ps1
+./install.ps1
+Check-Call { C:\ProgramData\chocolatey\choco install python2 -y --no-progress }
+Check-Call { C:\ProgramData\chocolatey\choco install python --version=3.7.0 
--force -y --no-progress }
+Check-Call { C:\Python37\python -m pip install --upgrade pip  }
+Check-Call { C:\Python37\python -m pip install -r requirements.txt  }
+Check-Call { C:\Python27\python -m pip install --upgrade pip  }
+Check-Call { C:\Python27\python -m pip install -r requirements.txt  }
+
+Check-Call { C:\ProgramData\chocolatey\choco install git -y }
+Check-Call { C:\ProgramData\chocolatey\choco install 7zip -y }
+Check-Call { C:\ProgramData\chocolatey\choco install cmake -y }
+Check-Call { setx PATH "$($env:path);c:\Program Files\CMake\bin" }
+Check-Call { C:\ProgramData\chocolatey\choco install ninja -y }
+
+# Deps
+Check-Call { C:\Python37\python  windows_deps_headless_installer.py --gpu }
+
+# Other software
+#Check-Call { C:\ProgramData\chocolatey\choco install jom -y }
 
 Review comment:
   Let's keep for the moment in case there's issues with ninja.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] larroy commented on issue #17181: Fail to build mxnet from source

2020-01-13 Thread GitBox
larroy commented on issue #17181: Fail to build mxnet from source
URL: 
https://github.com/apache/incubator-mxnet/issues/17181#issuecomment-573946495
 
 
   what's the root cause of this? This is something we should add to CI if 
users are facing these difficulties.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] marcoabreu commented on a change in pull request #17206: Windows dev environment configuration, update install instructions from source in the docs.

2020-01-13 Thread GitBox
marcoabreu commented on a change in pull request #17206: Windows dev 
environment configuration, update install instructions from source in the docs.
URL: https://github.com/apache/incubator-mxnet/pull/17206#discussion_r366105512
 
 

 ##
 File path: ci/windows_dev_env/windows_deps_headless_installer.py
 ##
 @@ -0,0 +1,373 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+"""Dependency installer for Windows"""
+
+__author__ = 'Pedro Larroy, Chance Bair'
+__version__ = '0.2'
+
+import argparse
+import errno
+import logging
+import os
+import psutil
+import shutil
+import subprocess
+import urllib
+import stat
+import tempfile
+import zipfile
+from time import sleep
+from urllib.error import HTTPError
+import logging
+from subprocess import check_output, check_call
+import re
+import sys
+import urllib.request
+
+import ssl
+
+ssl._create_default_https_context = ssl._create_unverified_context
+
+log = logging.getLogger(__name__)
+
+
+DEPS = {
+'openblas': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/OpenBLAS-windows-v0_2_19.zip',
+'opencv': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/opencv-windows-4.1.2-vc14_vc15.zip',
+'cudnn': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/cudnn-9.2-windows10-x64-v7.4.2.24.zip',
+'nvdriver': 
'https://windows-post-install.s3-us-west-2.amazonaws.com/nvidia_display_drivers_398.75_server2016.zip',
+'cmake': 
'https://github.com/Kitware/CMake/releases/download/v3.16.2/cmake-3.16.2-win64-x64.msi'
+}
+
+DEFAULT_SUBPROCESS_TIMEOUT=3600
+
+
+def retry(target_exception, tries=4, delay_s=1, backoff=2):
+"""Retry calling the decorated function using an exponential backoff.
+
+http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
+original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry
+
+:param target_exception: the exception to check. may be a tuple of
+exceptions to check
+:type target_exception: Exception or tuple
+:param tries: number of times to try (not retry) before giving up
+:type tries: int
+:param delay_s: initial delay between retries in seconds
+:type delay_s: int
+:param backoff: backoff multiplier e.g. value of 2 will double the delay
+each retry
+:type backoff: int
+"""
+import time
+from functools import wraps
+
+def decorated_retry(f):
+@wraps(f)
+def f_retry(*args, **kwargs):
+mtries, mdelay = tries, delay_s
+while mtries > 1:
+try:
+return f(*args, **kwargs)
+except target_exception as e:
+logging.warning("Exception: %s, Retrying in %d 
seconds...", str(e), mdelay)
+time.sleep(mdelay)
+mtries -= 1
+mdelay *= backoff
+return f(*args, **kwargs)
+
+return f_retry  # true decorator
+
+return decorated_retry
+
+
+@retry((ValueError, OSError, HTTPError), tries=5, delay_s=2, backoff=5)
+def download(url, dest=None, progress=True) -> str:
+from urllib.request import urlopen
+from urllib.parse import (urlparse, urlunparse)
+import progressbar
+import http.client
+
+class ProgressCB():
+def __init__(self):
+self.pbar = None
+
+def __call__(self, block_num, block_size, total_size):
+if not self.pbar and total_size > 0:
+self.pbar = progressbar.bar.ProgressBar(max_value=total_size)
+downloaded = block_num * block_size
+if self.pbar:
+if downloaded < total_size:
+self.pbar.update(downloaded)
+else:
+self.pbar.finish()
+if dest and os.path.isdir(dest):
+local_file = os.path.split(urlparse(url).path)[1]
+local_path = os.path.normpath(os.path.join(dest, local_file))
+else:
+local_path = dest
+with urlopen(url) as c:
+content_length = c.getheader('content-length')
+length = int(content_length) if content_length and isinstance(c, 
http.client.HTTPResponse) else None
+if length and local_path and os.path.exists(loca

[GitHub] [incubator-mxnet] marcoabreu commented on a change in pull request #17206: Windows dev environment configuration, update install instructions from source in the docs.

2020-01-13 Thread GitBox
marcoabreu commented on a change in pull request #17206: Windows dev 
environment configuration, update install instructions from source in the docs.
URL: https://github.com/apache/incubator-mxnet/pull/17206#discussion_r366105009
 
 

 ##
 File path: ci/windows_dev_env/setup.ps1
 ##
 @@ -0,0 +1,57 @@
+
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+
+$ErrorActionPreference = "Stop"
+Set-StrictMode -Version Latest
+function Check-Call {
+param (
+[scriptblock]$ScriptBlock
+)
+Write-Host "Executing $ScriptBlock"
+& @ScriptBlock
+if (($lastexitcode -ne 0)) {
+   Write-Error "Execution failed with $lastexitcode"
+exit $lastexitcode
+}
+}
+Set-ExecutionPolicy Bypass -Scope Process -Force
+Invoke-WebRequest -Uri https://chocolatey.org/install.ps1 -OutFile install.ps1
+./install.ps1
+Check-Call { C:\ProgramData\chocolatey\choco install python2 -y --no-progress }
+Check-Call { C:\ProgramData\chocolatey\choco install python --version=3.7.0 
--force -y --no-progress }
+Check-Call { C:\Python37\python -m pip install --upgrade pip  }
+Check-Call { C:\Python37\python -m pip install -r requirements.txt  }
+Check-Call { C:\Python27\python -m pip install --upgrade pip  }
+Check-Call { C:\Python27\python -m pip install -r requirements.txt  }
+
+Check-Call { C:\ProgramData\chocolatey\choco install git -y }
+Check-Call { C:\ProgramData\chocolatey\choco install 7zip -y }
+Check-Call { C:\ProgramData\chocolatey\choco install cmake -y }
+Check-Call { setx PATH "$($env:path);c:\Program Files\CMake\bin" }
+Check-Call { C:\ProgramData\chocolatey\choco install ninja -y }
+
+# Deps
+Check-Call { C:\Python37\python  windows_deps_headless_installer.py --gpu }
+
+# Other software
+#Check-Call { C:\ProgramData\chocolatey\choco install jom -y }
 
 Review comment:
   Remove?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] marcoabreu commented on a change in pull request #17206: Windows dev environment configuration, update install instructions from source in the docs.

2020-01-13 Thread GitBox
marcoabreu commented on a change in pull request #17206: Windows dev 
environment configuration, update install instructions from source in the docs.
URL: https://github.com/apache/incubator-mxnet/pull/17206#discussion_r366104580
 
 

 ##
 File path: ci/build_windows.py
 ##
 @@ -252,8 +241,8 @@ def main():
 logging.info("Detected Windows platform")
 if 'OpenBLAS_HOME' not in os.environ:
 os.environ["OpenBLAS_HOME"] = "C:\\Program Files\\OpenBLAS-v0.2.19"
-if 'OpenCV_DIR' not in os.environ:
-os.environ["OpenCV_DIR"] = "C:\\Program 
Files\\OpenCV-v3.4.1\\build"
+#if 'OpenCV_DIR' not in os.environ:
 
 Review comment:
   Remove?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] marcoabreu commented on a change in pull request #17206: Windows dev environment configuration, update install instructions from source in the docs.

2020-01-13 Thread GitBox
marcoabreu commented on a change in pull request #17206: Windows dev 
environment configuration, update install instructions from source in the docs.
URL: https://github.com/apache/incubator-mxnet/pull/17206#discussion_r366104819
 
 

 ##
 File path: ci/windows_dev_env/requirements.txt
 ##
 @@ -0,0 +1,4 @@
+psutil
+boto3
+python-jenkins
 
 Review comment:
   Where is this used?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] eric-haibin-lin opened a new issue #17295: [website] many contrib ops do not render correctly

2020-01-13 Thread GitBox
eric-haibin-lin opened a new issue #17295: [website] many contrib ops do not 
render correctly 
URL: https://github.com/apache/incubator-mxnet/issues/17295
 
 
   The following ops have code snippets that don't render correctly: 
   
   interleaved_matmul_encdec_qk
   interleaved_matmul_encdec_valatt
   interleaved_matmul_selfatt_qk
   interleaved_matmul_selfatt_valatt
   
   ![Screen Shot 2020-01-13 at 4 50 48 
PM](https://user-images.githubusercontent.com/5545640/72304250-dcd6ab00-3624-11ea-8af5-a7dcd10ef3bc.png)
   
   
https://mxnet.apache.org/api/python/docs/api/contrib/ndarray/index.html#mxnet.contrib.ndarray.interleaved_matmul_selfatt_valatt
   
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[incubator-mxnet-site] branch asf-site updated: Bump the publish timestamp.

2020-01-13 Thread aaronmarkham
This is an automated email from the ASF dual-hosted git repository.

aaronmarkham pushed a commit to branch asf-site
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet-site.git


The following commit(s) were added to refs/heads/asf-site by this push:
 new 7a0ea3d  Bump the publish timestamp.
7a0ea3d is described below

commit 7a0ea3d013345d9550d5baae3ed4b34388754a46
Author: mxnet-ci 
AuthorDate: Tue Jan 14 00:42:21 2020 +

Bump the publish timestamp.
---
 date.txt | 1 +
 1 file changed, 1 insertion(+)

diff --git a/date.txt b/date.txt
new file mode 100644
index 000..91a3236
--- /dev/null
+++ b/date.txt
@@ -0,0 +1 @@
+Tue Jan 14 00:42:21 UTC 2020



[GitHub] [incubator-mxnet] mjsML commented on issue #17181: Fail to build mxnet from source

2020-01-13 Thread GitBox
mjsML commented on issue #17181: Fail to build mxnet from source
URL: 
https://github.com/apache/incubator-mxnet/issues/17181#issuecomment-573941988
 
 
   @larroy master ... a similar issue also occurs when you chose MKL as BLAS 
too (the linker complains on another symbol tho) ...


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] anirudh2290 commented on a change in pull request #16654: Multithreaded Inference Support

2020-01-13 Thread GitBox
anirudh2290 commented on a change in pull request #16654: Multithreaded 
Inference Support
URL: https://github.com/apache/incubator-mxnet/pull/16654#discussion_r366101117
 
 

 ##
 File path: src/imperative/cached_op.h
 ##
 @@ -26,8 +26,191 @@
 #include 
 #include 
 #include 
+#include 
+#include "../operator/operator_common.h"
+#include "../operator/subgraph/common.h"
+#include "./imperative_utils.h"
 
 namespace mxnet {
+namespace {
+
+  static const char FULL[] = "full";
+  static const char FORWARD[] = "forward";
+  static const char BACKWARD[] = "backward";
+  static const char REF_COUNT[] = "ref_count";
+  static const char MEM_PLAN[] = "mem_plan";
+  static const char STORAGE_PLAN[] = "storage_plan";
+
+std::string AddPrefix(const std::string& prefix,
+  const std::string& s) {
+  return prefix + "_" + s;
+}
+
+/* \brief create a forward graph from the Symbol */
+void CreateForwardGraph(const nnvm::Symbol &sym, nnvm::Graph *fwd_graph) {
+  using namespace nnvm;
+  static const auto _copy_op = Op::Get("_copy");
+  NodeEntryMap dedup_out;
 
 Review comment:
   its not a set, NodeEntryMap is typedef'd to following in NNVM:   
   
   template
 using NodeEntryMap = std::unordered_map;
   


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] anirudh2290 commented on a change in pull request #16654: Multithreaded Inference Support

2020-01-13 Thread GitBox
anirudh2290 commented on a change in pull request #16654: Multithreaded 
Inference Support
URL: https://github.com/apache/incubator-mxnet/pull/16654#discussion_r366100882
 
 

 ##
 File path: src/imperative/cached_op.h
 ##
 @@ -26,8 +26,180 @@
 #include 
 #include 
 #include 
+#include 
+#include "../operator/operator_common.h"
+#include "../operator/subgraph/common.h"
+#include "./imperative_utils.h"
 
 namespace mxnet {
+namespace {
 
 Review comment:
   This is specific to cached op and mxnet. The location is correct.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] anirudh2290 commented on a change in pull request #16654: Multithreaded Inference Support

2020-01-13 Thread GitBox
anirudh2290 commented on a change in pull request #16654: Multithreaded 
Inference Support
URL: https://github.com/apache/incubator-mxnet/pull/16654#discussion_r366100219
 
 

 ##
 File path: src/imperative/cached_op_threadsafe.cc
 ##
 @@ -0,0 +1,373 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include 
+#include 
+#include "./imperative_utils.h"
+#include "../executor/exec_pass.h"
+#include "./cached_op_threadsafe.h"
+#include "../profiler/profiler.h"
+#include "../operator/operator_common.h"
+#include "../operator/subgraph/common.h"
+
+namespace mxnet {
+
+DMLC_REGISTER_PARAMETER(CachedOpThreadSafeConfig);
+
+constexpr uint32_t kEidNotExist = std::numeric_limits::max();
+
+
+struct CachedOpThreadSafe::GraphInfo {
+  nnvm::Graph fwd_graph;
+};
+
+struct CachedOpThreadSafe::DynamicRuntime {
+  GraphInfo info;
+  std::vector op_states;
+};
+
+OpStatePtr CachedOpThreadSafe::GetCachedOpState(
+const Context& ctx) {
+
+  for (const auto& i : cached_op_states_[ctx]) {
+// only create one state per device when not using static memory
+if (!config_.static_alloc || i.unique()) {
+  return i;
+}
+  }
+  nnvm::Graph full_graph;
+  auto state_ptr = OpStatePtr::Create(ctx, fwd_graph_, 
full_graph, false);
+
+  cached_op_states_[ctx].push_back(state_ptr);
+  return state_ptr;
+}
+
+
+CachedOpThreadSafe::CachedOpThreadSafe(const nnvm::Symbol& sym,
+   const std::vector >& flags) : CachedOp(sym, 
flags) {
+  using namespace nnvm;
+  using namespace imperative;
+  static const std::vector zero_ops{Op::Get("zeros_like"),
+Op::Get("_zeros")};
+  static const auto _copy_op = Op::Get("_copy");
+  config_.Init(flags);
+
+  if (config_.static_shape) {
+  CHECK(config_.static_alloc) << "static_alloc must be True when 
static_shape is True";
+  }
+
+  // construct forward graph
+  {
+NodeEntryMap dedup_out;
+for (const NodeEntry &nodeEntry : sym.outputs) {
+  if (dedup_out.find(nodeEntry) != dedup_out.end()) {
+NodePtr copy_node = Node::Create();
+copy_node->attrs.op = _copy_op;
+copy_node->attrs.name = nodeEntry.node->attrs.name + "_copy" +
+std::to_string(dedup_out[nodeEntry]++);
+copy_node->inputs.emplace_back(nodeEntry);
+if (_copy_op->attr_parser != nullptr) {
+  _copy_op->attr_parser(&(copy_node->attrs));
+}
+fwd_graph_.outputs.emplace_back(std::move(copy_node));
+  } else {
+dedup_out.emplace(nodeEntry, 0);
+fwd_graph_.outputs.push_back(nodeEntry);
+  }
+}
+
+const auto &idx = fwd_graph_.indexed_graph();
+CHECK_GE(idx.input_nodes().size(), 1)
+<< "CachedOp requires at least 1 input";
+
+std::vector ref_count(idx.num_node_entries(), 0);
+for (const auto &i : idx.input_nodes())
+  ++ref_count[idx.entry_id(i, 0)];
+for (const auto &i : idx.outputs())
+  ++ref_count[idx.entry_id(i)];
+for (size_t i = 0; i < idx.num_nodes(); ++i) {
+  for (const auto &j : idx[i].inputs)
+++ref_count[idx.entry_id(j)];
+}
+
+fwd_graph_.attrs["forward_ref_count"] =
+std::make_shared(std::move(ref_count));
+  }
+
+  // Set param indices
+  {
+const auto& indexed_graph = fwd_graph_.indexed_graph();
+if (config_.data_indices.ndim() || config_.param_indices.ndim()) {
+  CHECK_EQ(config_.data_indices.ndim() + config_.param_indices.ndim(),
+   indexed_graph.input_nodes().size());
+} else {
+  std::vector tmp;
+  tmp.reserve(indexed_graph.input_nodes().size());
+  for (size_t i = 0; i < indexed_graph.input_nodes().size(); ++i) {
+tmp.emplace_back(i);
+  }
+  config_.data_indices.assign(tmp.begin(), tmp.end());
+}
+  }
+}
+
+OpStatePtr CachedOpThreadSafe::DynamicForward(const Context& default_ctx,
+  const std::vector& 
inputs,
+  const std::vector& 
outputs) {
+  using namespace nnvm;
+  using namespace imperative;
+
+  {
+  auto state_ptr = GetCachedOpState(default_ctx);
+  auto op_state = OpStatePtr::Create();
+ 

[GitHub] [incubator-mxnet] eric-haibin-lin commented on issue #17208: gather_nd: check bound and wrap negative indices

2020-01-13 Thread GitBox
eric-haibin-lin commented on issue #17208: gather_nd: check bound and wrap 
negative indices
URL: https://github.com/apache/incubator-mxnet/pull/17208#issuecomment-573936800
 
 
   @szha we should not mix commits in a PR. It makes it hard to search for the 
commit that fixes " NormalizeError" APIs


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] apeforest opened a new pull request #17294: fix build from source instruction

2020-01-13 Thread GitBox
apeforest opened a new pull request #17294: fix build from source instruction
URL: https://github.com/apache/incubator-mxnet/pull/17294
 
 
   ## Description ##
   Need to add a blank line before bash section, otherwise the html page is not 
rendered properly
   https://mxnet.apache.org/get_started/ubuntu_setup.html
   
   - break long lines
   - remove unnecesarry tab space
   - remove $ to make it consistent
   
   @leezu @aaronmarkham please review.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] larroy commented on issue #17181: Fail to build mxnet from source

2020-01-13 Thread GitBox
larroy commented on issue #17181: Fail to build mxnet from source
URL: 
https://github.com/apache/incubator-mxnet/issues/17181#issuecomment-573935556
 
 
   Which branch is this? master or 1.6.x?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] larroy commented on a change in pull request #16654: Multithreaded Inference Support

2020-01-13 Thread GitBox
larroy commented on a change in pull request #16654: Multithreaded Inference 
Support
URL: https://github.com/apache/incubator-mxnet/pull/16654#discussion_r366090803
 
 

 ##
 File path: src/imperative/cached_op.h
 ##
 @@ -26,8 +26,191 @@
 #include 
 #include 
 #include 
+#include 
+#include "../operator/operator_common.h"
+#include "../operator/subgraph/common.h"
+#include "./imperative_utils.h"
 
 namespace mxnet {
+namespace {
+
+  static const char FULL[] = "full";
+  static const char FORWARD[] = "forward";
+  static const char BACKWARD[] = "backward";
+  static const char REF_COUNT[] = "ref_count";
+  static const char MEM_PLAN[] = "mem_plan";
+  static const char STORAGE_PLAN[] = "storage_plan";
+
+std::string AddPrefix(const std::string& prefix,
+  const std::string& s) {
+  return prefix + "_" + s;
+}
+
+/* \brief create a forward graph from the Symbol */
+void CreateForwardGraph(const nnvm::Symbol &sym, nnvm::Graph *fwd_graph) {
+  using namespace nnvm;
+  static const auto _copy_op = Op::Get("_copy");
+  NodeEntryMap dedup_out;
 
 Review comment:
   Why this needs to map to size_t when you are treating just as a set?


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] larroy commented on a change in pull request #16654: Multithreaded Inference Support

2020-01-13 Thread GitBox
larroy commented on a change in pull request #16654: Multithreaded Inference 
Support
URL: https://github.com/apache/incubator-mxnet/pull/16654#discussion_r366090981
 
 

 ##
 File path: src/imperative/cached_op.h
 ##
 @@ -26,8 +26,195 @@
 #include 
 #include 
 #include 
+#include 
+#include "../operator/operator_common.h"
+#include "../operator/subgraph/common.h"
+#include "./imperative_utils.h"
 
 namespace mxnet {
+namespace {
+
+  static const char FULL[] = "full";
+  static const char FORWARD[] = "forward";
+  static const char BACKWARD[] = "backward";
+  static const char REF_COUNT[] = "ref_count";
+  static const char MEM_PLAN[] = "mem_plan";
+  static const char STORAGE_PLAN[] = "storage_plan";
+
+std::string AddPrefix(const std::string& prefix,
+  const std::string& s) {
+  return prefix + "_" + s;
+}
+
+/* \brief create a forward graph from they Symbol */
+void CreateForwardGraph(const nnvm::Symbol &sym, nnvm::Graph *fwd_graph) {
+  using namespace nnvm;
+  static const auto _copy_op = Op::Get("_copy");
+  {
+NodeEntryMap dedup_out;
+for (const NodeEntry& nodeEntry : sym.outputs) {
+  if (dedup_out.find(nodeEntry) != dedup_out.end()) {
+NodePtr copy_node = Node::Create();
+copy_node->attrs.op = _copy_op;
+copy_node->attrs.name =
+nodeEntry.node->attrs.name + "_copy" + 
std::to_string(dedup_out[nodeEntry]++);
+copy_node->inputs.emplace_back(nodeEntry);
+if (_copy_op->attr_parser != nullptr) {
+  _copy_op->attr_parser(&(copy_node->attrs));
+}
+fwd_graph->outputs.emplace_back(std::move(copy_node));
+  } else {
+dedup_out.emplace(nodeEntry, 0);
+fwd_graph->outputs.push_back(nodeEntry);
+  }
+}
+  }
+}
+
+/* \brief construct  fwd_graph, grad_graph and full_graph from symbol */
+void CreateFullGraph(const nnvm::Symbol& sym,
+ nnvm::Graph* fwd_graph,
+ nnvm::Graph* grad_graph,
+ nnvm::Graph* full_graph,
+ std::vector* ograd_entries,
+ std::unordered_map* 
fwd_input_to_grad_output) {
+  using namespace nnvm;
+  static const std::vector zero_ops{Op::Get("zeros_like"), 
Op::Get("_zeros")};
+  CreateForwardGraph(sym, fwd_graph);
+
+  bool do_elim_common_expr = dmlc::GetEnv("MXNET_ELIMINATE_COMMON_EXPR", true);
+  if (do_elim_common_expr)
+*fwd_graph = exec::EliminateCommonExpr(std::move(*fwd_graph));
+
+  // construct backward graph
+  {
 
 Review comment:
   Is not about being used elsewhere, is about readability. If you don't need 
to dive deep is better to read a function call and skip the details.


This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services


[GitHub] [incubator-mxnet] larroy commented on a change in pull request #16654: Multithreaded Inference Support

2020-01-13 Thread GitBox
larroy commented on a change in pull request #16654: Multithreaded Inference 
Support
URL: https://github.com/apache/incubator-mxnet/pull/16654#discussion_r366091401
 
 

 ##
 File path: src/imperative/cached_op_threadsafe.cc
 ##
 @@ -0,0 +1,373 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+#include 
+#include 
+#include "./imperative_utils.h"
+#include "../executor/exec_pass.h"
+#include "./cached_op_threadsafe.h"
+#include "../profiler/profiler.h"
+#include "../operator/operator_common.h"
+#include "../operator/subgraph/common.h"
+
+namespace mxnet {
+
+DMLC_REGISTER_PARAMETER(CachedOpThreadSafeConfig);
+
+constexpr uint32_t kEidNotExist = std::numeric_limits::max();
+
+
+struct CachedOpThreadSafe::GraphInfo {
+  nnvm::Graph fwd_graph;
+};
+
+struct CachedOpThreadSafe::DynamicRuntime {
+  GraphInfo info;
+  std::vector op_states;
+};
+
+OpStatePtr CachedOpThreadSafe::GetCachedOpState(
+const Context& ctx) {
+
+  for (const auto& i : cached_op_states_[ctx]) {
+// only create one state per device when not using static memory
+if (!config_.static_alloc || i.unique()) {
+  return i;
+}
+  }
+  nnvm::Graph full_graph;
+  auto state_ptr = OpStatePtr::Create(ctx, fwd_graph_, 
full_graph, false);
+
+  cached_op_states_[ctx].push_back(state_ptr);
+  return state_ptr;
+}
+
+
+CachedOpThreadSafe::CachedOpThreadSafe(const nnvm::Symbol& sym,
+   const std::vector >& flags) : CachedOp(sym, 
flags) {
+  using namespace nnvm;
+  using namespace imperative;
+  static const std::vector zero_ops{Op::Get("zeros_like"),
+Op::Get("_zeros")};
+  static const auto _copy_op = Op::Get("_copy");
+  config_.Init(flags);
+
+  if (config_.static_shape) {
+  CHECK(config_.static_alloc) << "static_alloc must be True when 
static_shape is True";
+  }
+
+  // construct forward graph
+  {
+NodeEntryMap dedup_out;
+for (const NodeEntry &nodeEntry : sym.outputs) {
+  if (dedup_out.find(nodeEntry) != dedup_out.end()) {
+NodePtr copy_node = Node::Create();
+copy_node->attrs.op = _copy_op;
+copy_node->attrs.name = nodeEntry.node->attrs.name + "_copy" +
+std::to_string(dedup_out[nodeEntry]++);
+copy_node->inputs.emplace_back(nodeEntry);
+if (_copy_op->attr_parser != nullptr) {
+  _copy_op->attr_parser(&(copy_node->attrs));
+}
+fwd_graph_.outputs.emplace_back(std::move(copy_node));
+  } else {
+dedup_out.emplace(nodeEntry, 0);
+fwd_graph_.outputs.push_back(nodeEntry);
+  }
+}
+
+const auto &idx = fwd_graph_.indexed_graph();
+CHECK_GE(idx.input_nodes().size(), 1)
+<< "CachedOp requires at least 1 input";
+
+std::vector ref_count(idx.num_node_entries(), 0);
+for (const auto &i : idx.input_nodes())
+  ++ref_count[idx.entry_id(i, 0)];
+for (const auto &i : idx.outputs())
+  ++ref_count[idx.entry_id(i)];
+for (size_t i = 0; i < idx.num_nodes(); ++i) {
+  for (const auto &j : idx[i].inputs)
+++ref_count[idx.entry_id(j)];
+}
+
+fwd_graph_.attrs["forward_ref_count"] =
+std::make_shared(std::move(ref_count));
+  }
+
+  // Set param indices
+  {
+const auto& indexed_graph = fwd_graph_.indexed_graph();
+if (config_.data_indices.ndim() || config_.param_indices.ndim()) {
+  CHECK_EQ(config_.data_indices.ndim() + config_.param_indices.ndim(),
+   indexed_graph.input_nodes().size());
+} else {
+  std::vector tmp;
+  tmp.reserve(indexed_graph.input_nodes().size());
+  for (size_t i = 0; i < indexed_graph.input_nodes().size(); ++i) {
+tmp.emplace_back(i);
+  }
+  config_.data_indices.assign(tmp.begin(), tmp.end());
+}
+  }
+}
+
+OpStatePtr CachedOpThreadSafe::DynamicForward(const Context& default_ctx,
+  const std::vector& 
inputs,
+  const std::vector& 
outputs) {
+  using namespace nnvm;
+  using namespace imperative;
+
+  {
+  auto state_ptr = GetCachedOpState(default_ctx);
+  auto op_state = OpStatePtr::Create();
+  auto

  1   2   >