[GitHub] reminisce closed pull request #11937: Fix quantized graphpass bug
reminisce closed pull request #11937: Fix quantized graphpass bug URL: https://github.com/apache/incubator-mxnet/pull/11937 This is a PR merged from a forked repository. As GitHub hides the original diff on merge, it is displayed below for the sake of provenance: As this is a foreign pull request (from a fork), the diff is supplied below (as it won't show otherwise due to GitHub magic): diff --git a/src/operator/quantization/quantize_graph_pass.cc b/src/operator/quantization/quantize_graph_pass.cc index 5376a0ee9f1..10834868d2b 100644 --- a/src/operator/quantization/quantize_graph_pass.cc +++ b/src/operator/quantization/quantize_graph_pass.cc @@ -221,6 +221,9 @@ Graph QuantizeGraph(Graph &) { new_node->inputs.emplace_back(NodeEntry{dequantize_node, 0, 0}); mirror_map[e.node.get()] = std::move(dequantize_node); +} else if (mirror_node->op() != nullptr + && mirror_node->op()->name == "_contrib_quantize") { + new_node->inputs.emplace_back(NodeEntry{mirror_node->inputs[0].node, e.index, e.version}); } else { new_node->inputs.emplace_back(NodeEntry{mirror_node, e.index, e.version}); } diff --git a/tests/python/quantization/test_quantization.py b/tests/python/quantization/test_quantization.py index 359bbee569f..bfae58e49d0 100644 --- a/tests/python/quantization/test_quantization.py +++ b/tests/python/quantization/test_quantization.py @@ -396,6 +396,17 @@ def get_fp32_sym(): out_grad=False, preserve_shape=False, use_ignore=False, name='softmax') return sym +def get_fp32_residual(): +data = mx.sym.Variable('data') +conv = mx.sym.Convolution(data=data, num_filter=4, kernel=(1,1), pad=(0,0), + no_bias=True, name='conv') +bn = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=2e-5, momentum=0.9, name='bn') +act = mx.sym.Activation(data=bn + data, act_type='relu', name='relu') +pool = mx.sym.Pooling(act, kernel=(4, 4), pool_type='avg', name='pool') +fc = mx.sym.FullyConnected(pool, num_hidden=10, flatten=True, name='fc') +sym = mx.sym.SoftmaxOutput(fc, grad_scale=1, ignore_label=-1, multi_output=False, + out_grad=False, preserve_shape=False, use_ignore=False, name='softmax') +return sym @with_seed() def test_quantize_model(): @@ -463,6 +474,101 @@ def check_qsym_qdtype(qsym, qdtype): for qdtype in ['int8', 'uint8']: check_quantize_model(qdtype) +@with_seed() +def test_quantize_residual_unit(): +def check_quantize_model(qdtype): +if is_test_for_native_cpu(): +print('skipped testing quantized_residual_unit for native cpu since it is not supported yet') +return +elif qdtype == 'int8' and is_test_for_mkldnn(): +print('skipped testing quantized_residual_unit for mkldnn cpu int8 since it is not supported yet') +return +elif qdtype == 'uint8' and is_test_for_gpu(): +print('skipped testing quantized_residual_unit for gpu uint8 since it is not supported yet') +return + +def check_params(params, qparams, qsym=None): +if qsym is None: +assert len(params) == len(qparams) +for k, v in params.items(): +assert k in qparams +assert same(v.asnumpy(), qparams[k].asnumpy()) +else: +qparams_ground_truth = mx.contrib.quant._quantize_params(qsym, params) +assert len(qparams) == len(qparams_ground_truth) +for k, v in qparams_ground_truth.items(): +assert k in qparams +assert same(v.asnumpy(), qparams[k].asnumpy()) + +def check_qsym_calibrated(qsym): +attrs = qsym.attr_dict() +for k, v in attrs.items(): +if k.find('requantize_') != -1: +assert 'min_calib_range' in v +assert 'max_calib_range' in v + +def check_qsym_qdtype(qsym, qdtype): +attrs = qsym.attr_dict() +for k, v in attrs.items(): +if k.find('_quantize') != -1: +assert 'out_type' in v +assert v['out_type'] == qdtype + +def check_qsym_forward(qsym, qarg_params, qaux_params, data_shape, label_shape): +mod = mx.mod.Module(symbol=qsym, context=mx.current_context()) +mod.bind(for_training=False, + data_shapes=[('data', data_shape)], + label_shapes=[('softmax_label', label_shape)]) +mod.set_params(qarg_params, qaux_params) +data = [mx.random.uniform(-1.0, 1.0, shape=shape) for _, shape in mod.data_shapes] +batch = mx.io.DataBatch(data, []) +mod.forward(batch, is_train=False) +for output in mod.get_outputs(): +
[incubator-mxnet] branch master updated: Fix quantized graphpass bug (#11937)
This is an automated email from the ASF dual-hosted git repository. reminisce pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git The following commit(s) were added to refs/heads/master by this push: new d076d10 Fix quantized graphpass bug (#11937) d076d10 is described below commit d076d10b1372308336d1008858401d1c59ed5826 Author: Xinyu Chen AuthorDate: Sun Aug 12 12:51:13 2018 +0800 Fix quantized graphpass bug (#11937) * fix quantized graphpass bug * add residual quantization testcase * handle dtype and backend issues --- src/operator/quantization/quantize_graph_pass.cc | 3 + tests/python/quantization/test_quantization.py | 106 +++ 2 files changed, 109 insertions(+) diff --git a/src/operator/quantization/quantize_graph_pass.cc b/src/operator/quantization/quantize_graph_pass.cc index 5376a0e..1083486 100644 --- a/src/operator/quantization/quantize_graph_pass.cc +++ b/src/operator/quantization/quantize_graph_pass.cc @@ -221,6 +221,9 @@ Graph QuantizeGraph(Graph &) { new_node->inputs.emplace_back(NodeEntry{dequantize_node, 0, 0}); mirror_map[e.node.get()] = std::move(dequantize_node); +} else if (mirror_node->op() != nullptr + && mirror_node->op()->name == "_contrib_quantize") { + new_node->inputs.emplace_back(NodeEntry{mirror_node->inputs[0].node, e.index, e.version}); } else { new_node->inputs.emplace_back(NodeEntry{mirror_node, e.index, e.version}); } diff --git a/tests/python/quantization/test_quantization.py b/tests/python/quantization/test_quantization.py index b73a2a4..369a923 100644 --- a/tests/python/quantization/test_quantization.py +++ b/tests/python/quantization/test_quantization.py @@ -396,6 +396,17 @@ def get_fp32_sym(): out_grad=False, preserve_shape=False, use_ignore=False, name='softmax') return sym +def get_fp32_residual(): +data = mx.sym.Variable('data') +conv = mx.sym.Convolution(data=data, num_filter=4, kernel=(1,1), pad=(0,0), + no_bias=True, name='conv') +bn = mx.sym.BatchNorm(data=conv, fix_gamma=False, eps=2e-5, momentum=0.9, name='bn') +act = mx.sym.Activation(data=bn + data, act_type='relu', name='relu') +pool = mx.sym.Pooling(act, kernel=(4, 4), pool_type='avg', name='pool') +fc = mx.sym.FullyConnected(pool, num_hidden=10, flatten=True, name='fc') +sym = mx.sym.SoftmaxOutput(fc, grad_scale=1, ignore_label=-1, multi_output=False, + out_grad=False, preserve_shape=False, use_ignore=False, name='softmax') +return sym @with_seed() def test_quantize_model(): @@ -463,6 +474,101 @@ def test_quantize_model(): for qdtype in ['int8', 'uint8']: check_quantize_model(qdtype) +@with_seed() +def test_quantize_residual_unit(): +def check_quantize_model(qdtype): +if is_test_for_native_cpu(): +print('skipped testing quantized_residual_unit for native cpu since it is not supported yet') +return +elif qdtype == 'int8' and is_test_for_mkldnn(): +print('skipped testing quantized_residual_unit for mkldnn cpu int8 since it is not supported yet') +return +elif qdtype == 'uint8' and is_test_for_gpu(): +print('skipped testing quantized_residual_unit for gpu uint8 since it is not supported yet') +return + +def check_params(params, qparams, qsym=None): +if qsym is None: +assert len(params) == len(qparams) +for k, v in params.items(): +assert k in qparams +assert same(v.asnumpy(), qparams[k].asnumpy()) +else: +qparams_ground_truth = mx.contrib.quant._quantize_params(qsym, params) +assert len(qparams) == len(qparams_ground_truth) +for k, v in qparams_ground_truth.items(): +assert k in qparams +assert same(v.asnumpy(), qparams[k].asnumpy()) + +def check_qsym_calibrated(qsym): +attrs = qsym.attr_dict() +for k, v in attrs.items(): +if k.find('requantize_') != -1: +assert 'min_calib_range' in v +assert 'max_calib_range' in v + +def check_qsym_qdtype(qsym, qdtype): +attrs = qsym.attr_dict() +for k, v in attrs.items(): +if k.find('_quantize') != -1: +assert 'out_type' in v +assert v['out_type'] == qdtype + +def check_qsym_forward(qsym, qarg_params, qaux_params, data_shape, label_shape): +mod = mx.mod.Module(symbol=qsym, context=mx.current_context()) +mod.bind(for_training=False, + data_shapes=[('data', data_shape)], +
[GitHub] wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API
wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API URL: https://github.com/apache/incubator-mxnet/pull/12047#discussion_r209442740 ## File path: python/mxnet/_ctypes/ndarray.py ## @@ -31,21 +31,24 @@ class NDArrayBase(object): """Base data structure for ndarray""" -__slots__ = ["handle", "writable"] +__slots__ = ["handle", "writable", "dlpack"] # pylint: disable= no-member -def __init__(self, handle, writable=True): +def __init__(self, handle, writable=True, dlpack=None): Review comment: I made a copy of NDArray as the member of NDArrayDLManager, and the copy increase the refcount. I'm confused how to modify the PR. After creating a new NDArray(Python) from DLPack, then delete the old NDArray(Python) and PyCapsule(DLPack). Which object will call the deleter function? In my case, when `a` gets deleted, how does `b` hold the NDArrayDLManager? It seems that `b` only get the pure data pointer from `DLManagedTensor::dl_tensor::data`, the type of the pointer is not shared pointer. And how does `b` store the pointer to `NDArrayDLManager` in MXNet NDArray? This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API
wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API URL: https://github.com/apache/incubator-mxnet/pull/12047#discussion_r209442740 ## File path: python/mxnet/_ctypes/ndarray.py ## @@ -31,21 +31,24 @@ class NDArrayBase(object): """Base data structure for ndarray""" -__slots__ = ["handle", "writable"] +__slots__ = ["handle", "writable", "dlpack"] # pylint: disable= no-member -def __init__(self, handle, writable=True): +def __init__(self, handle, writable=True, dlpack=None): Review comment: I made a copy of NDArray as the member of NDArrayDLManager, and the copy increase the refcount. I'm confused how to modify the PR. After creating a new NDArray(Python) from DLPack, then delete the old NDArray(Python) and PyCapsule(DLPack). Which object will call the deleter function? In my case, when `a` gets deleted, how does `b` hold the NDArrayDLManager? It seems that `b` only get the pure data pointer from `DLManagedTensor::dl_tensor::data`, the type of the pointer is not a shared pointer. And how does `b` store the pointer to `NDArrayDLManager` in MXNet NDArray? This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] vandanavk commented on issue #12137: [MXNET-696] Fix undefined name errors
vandanavk commented on issue #12137: [MXNET-696] Fix undefined name errors URL: https://github.com/apache/incubator-mxnet/pull/12137#issuecomment-412317196 @piiswrong @burness @ZihengJiang @iflament @antinucleon This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] vandanavk opened a new pull request #12137: Fix undefined name errors
vandanavk opened a new pull request #12137: Fix undefined name errors URL: https://github.com/apache/incubator-mxnet/pull/12137 ## Description ## Pylint throws an error for variables that are used but not defined previously. Since these variables are found in paths that are not executed, they don't show up as errors at runtime. But if executed, they could throw a Python NameError. ## Checklist ## ### Essentials ### Please feel free to remove inapplicable items for your PR. - [ ] The PR title starts with [MXNET-$JIRA_ID], where $JIRA_ID refers to the relevant [JIRA issue](https://issues.apache.org/jira/projects/MXNET/issues) created (except PRs with tiny changes) - [ ] Changes are complete (i.e. I finished coding on this PR) - [ ] All changes have test coverage: - Unit tests are added for small changes to verify correctness (e.g. adding a new operator) - Nightly tests are added for complicated/long-running ones (e.g. changing distributed kvstore) - Build tests will be added for build configuration changes (e.g. adding a new build option with NCCL) - [ ] Code is well-documented: - For user-facing API changes, API doc string has been updated. - For new C++ functions in header files, their functionalities and arguments are documented. - For new examples, README.md is added to explain the what the example does, the source of the dataset, expected performance on test set and reference to the original paper if applicable - Check the API doc at http://mxnet-ci-doc.s3-accelerate.dualstack.amazonaws.com/PR-$PR_ID/$BUILD_ID/index.html - [ ] To the my best knowledge, examples are either not affected by this change, or have been fixed to be compatible with this change ### Changes ### - [ ] Feature1, tests, (and when applicable, API doc) - [ ] Feature2, tests, (and when applicable, API doc) ## Comments ## Related issues #8270, #11904 @cclauss This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] vandanavk commented on issue #12099: Fix precision issue of test case test_rnnrelu_bidirectional
vandanavk commented on issue #12099: Fix precision issue of test case test_rnnrelu_bidirectional URL: https://github.com/apache/incubator-mxnet/pull/12099#issuecomment-412311479 @eric-haibin-lin Please change the tag to pr-awaiting-merge This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] marcoabreu commented on issue #11493: Fix MXPredReshape in the c_predict_api
marcoabreu commented on issue #11493: Fix MXPredReshape in the c_predict_api URL: https://github.com/apache/incubator-mxnet/pull/11493#issuecomment-412311230 No worries, thanks for your contribution and addressing the comments. Seems like you hit a flaky test. Please just make an empty commit to trigger a new ci run. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] adaaaaaa commented on issue #12132: [Feature Request] Implementation of L-BFGS optimizer
adaa commented on issue #12132: [Feature Request] Implementation of L-BFGS optimizer URL: https://github.com/apache/incubator-mxnet/issues/12132#issuecomment-412310204 follow with interest... This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] xinyu-intel commented on issue #11937: Fix quantized graphpass bug
xinyu-intel commented on issue #11937: Fix quantized graphpass bug URL: https://github.com/apache/incubator-mxnet/pull/11937#issuecomment-412310145 @reminisce yes, both gpu and mkldnn will come across below error when quantizing residual unit like resnet-50-v1: ``` RuntimeError: simple_bind error. Arguments: data: (4, 4, 10, 10) softmax_label: (4, 10) Error in operator _plus0: [08:21:17] /home/chenxiny/mxnet-gpu/3rdparty/mshadow/../../src/operator/tensor/../elemwise_op_common.h:133: Check failed: assign(, (*vec)[i]) Incompatible attr in node _plus0 at 1-th input: expected float32, got int8 ``` This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] zheng-da commented on a change in pull request #12106: [MXNET-795] Fix a bug that CutSubgraph works only when each subgraph has its distinct name
zheng-da commented on a change in pull request #12106: [MXNET-795] Fix a bug that CutSubgraph works only when each subgraph has its distinct name URL: https://github.com/apache/incubator-mxnet/pull/12106#discussion_r209438332 ## File path: tests/python/unittest/test_contrib_control_flow.py ## @@ -1765,6 +1767,45 @@ def hybrid_forward(self, F, data): assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.001, atol=0.0001) +def test_scope(): +class TestBlock1(gluon.HybridBlock): +def __init__(self, prefix=None, params=None): +super(TestBlock1, self).__init__(prefix=prefix, params=params) +def hybrid_forward(self, F, data): +(new_data, ) = F.contrib.cond( +data > 0.5, +then_func=lambda: data * 2, +else_func=lambda: data * 3, +name="my_cond", +) +return new_data +class TestBlock2(gluon.HybridBlock): +def __init__(self, prefix=None, params=None): +super(TestBlock2, self).__init__(prefix=prefix, params=params) +def hybrid_forward(self, F, data): +(new_data, ) = F.contrib.cond( +data > 0.5, +then_func=lambda: data * 2, +else_func=lambda: data * 3, +name="my_cond", +) +return new_data +AttrScope._subgraph_names = defaultdict(int) Review comment: good point. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] junrushao1994 commented on a change in pull request #12106: [MXNET-795] Fix a bug that CutSubgraph works only when each subgraph has its distinct name
junrushao1994 commented on a change in pull request #12106: [MXNET-795] Fix a bug that CutSubgraph works only when each subgraph has its distinct name URL: https://github.com/apache/incubator-mxnet/pull/12106#discussion_r209438186 ## File path: tests/python/unittest/test_contrib_control_flow.py ## @@ -1765,6 +1767,45 @@ def hybrid_forward(self, F, data): assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.001, atol=0.0001) +def test_scope(): +class TestBlock1(gluon.HybridBlock): +def __init__(self, prefix=None, params=None): +super(TestBlock1, self).__init__(prefix=prefix, params=params) +def hybrid_forward(self, F, data): +(new_data, ) = F.contrib.cond( +data > 0.5, +then_func=lambda: data * 2, +else_func=lambda: data * 3, +name="my_cond", +) +return new_data +class TestBlock2(gluon.HybridBlock): +def __init__(self, prefix=None, params=None): +super(TestBlock2, self).__init__(prefix=prefix, params=params) +def hybrid_forward(self, F, data): +(new_data, ) = F.contrib.cond( +data > 0.5, +then_func=lambda: data * 2, +else_func=lambda: data * 3, +name="my_cond", +) +return new_data +AttrScope._subgraph_names = defaultdict(int) Review comment: @zheng-da Note that `_subgraph_names` are more like a global variable, I add this in case it is changed in other testcases. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] hqucms commented on issue #11493: Fix MXPredReshape in the c_predict_api
hqucms commented on issue #11493: Fix MXPredReshape in the c_predict_api URL: https://github.com/apache/incubator-mxnet/pull/11493#issuecomment-412303107 @marcoabreu @anirudh2290 Sorry for the delay in response. I change to `rtol=1e-5, atol=1e-6` which are the ones used in https://github.com/apache/incubator-mxnet/blob/master/tests/python/mkl/test_mkldnn.py#L132. Please let me know if they are OK. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] zheng-da commented on a change in pull request #12106: [MXNET-795] Fix a bug that CutSubgraph works only when each subgraph has its distinct name
zheng-da commented on a change in pull request #12106: [MXNET-795] Fix a bug that CutSubgraph works only when each subgraph has its distinct name URL: https://github.com/apache/incubator-mxnet/pull/12106#discussion_r209437878 ## File path: tests/python/unittest/test_contrib_control_flow.py ## @@ -1765,6 +1767,45 @@ def hybrid_forward(self, F, data): assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.001, atol=0.0001) +def test_scope(): +class TestBlock1(gluon.HybridBlock): +def __init__(self, prefix=None, params=None): +super(TestBlock1, self).__init__(prefix=prefix, params=params) +def hybrid_forward(self, F, data): +(new_data, ) = F.contrib.cond( +data > 0.5, +then_func=lambda: data * 2, +else_func=lambda: data * 3, +name="my_cond", +) +return new_data +class TestBlock2(gluon.HybridBlock): +def __init__(self, prefix=None, params=None): +super(TestBlock2, self).__init__(prefix=prefix, params=params) +def hybrid_forward(self, F, data): +(new_data, ) = F.contrib.cond( +data > 0.5, +then_func=lambda: data * 2, +else_func=lambda: data * 3, +name="my_cond", +) +return new_data +AttrScope._subgraph_names = defaultdict(int) Review comment: do we need to define this? This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] reminisce commented on issue #11937: Fix quantized graphpass bug
reminisce commented on issue #11937: Fix quantized graphpass bug URL: https://github.com/apache/incubator-mxnet/pull/11937#issuecomment-412300028 @xinyu-intel Thanks for the fix. Is the unit test the one that fails without this PR? This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] nswamy closed pull request #12129: update dmlc-core for security reason
nswamy closed pull request #12129: update dmlc-core for security reason URL: https://github.com/apache/incubator-mxnet/pull/12129 This is a PR merged from a forked repository. As GitHub hides the original diff on merge, it is displayed below for the sake of provenance: As this is a foreign pull request (from a fork), the diff is supplied below (as it won't show otherwise due to GitHub magic): diff --git a/3rdparty/dmlc-core b/3rdparty/dmlc-core index 649be18a8c5..958c22b32c1 16 --- a/3rdparty/dmlc-core +++ b/3rdparty/dmlc-core @@ -1 +1 @@ -Subproject commit 649be18a8c55c48517861d67158a45dec54992ee +Subproject commit 958c22b32c116ec967a9247d09eddb9c21ea6d4f This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[incubator-mxnet] branch master updated: update dmlc-core (#12129)
This is an automated email from the ASF dual-hosted git repository. nswamy pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git The following commit(s) were added to refs/heads/master by this push: new 096aeb6 update dmlc-core (#12129) 096aeb6 is described below commit 096aeb69785952dbac693aeb6789c8e8ca7acfbc Author: Hao Jin AuthorDate: Sat Aug 11 14:26:11 2018 -0400 update dmlc-core (#12129) --- 3rdparty/dmlc-core | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/3rdparty/dmlc-core b/3rdparty/dmlc-core index 649be18..958c22b 16 --- a/3rdparty/dmlc-core +++ b/3rdparty/dmlc-core @@ -1 +1 @@ -Subproject commit 649be18a8c55c48517861d67158a45dec54992ee +Subproject commit 958c22b32c116ec967a9247d09eddb9c21ea6d4f
[incubator-mxnet] branch master updated: zipfian random sampler without replacement (#12113)
This is an automated email from the ASF dual-hosted git repository. zhasheng pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git The following commit(s) were added to refs/heads/master by this push: new 6f7254c zipfian random sampler without replacement (#12113) 6f7254c is described below commit 6f7254c91709904a9fb6290f1998fcf2da818d0e Author: Haibin Lin AuthorDate: Sat Aug 11 10:55:35 2018 -0700 zipfian random sampler without replacement (#12113) * code compiles * update doc * fix bug and add test * fix lint --- src/operator/random/unique_sample_op.cc | 72 ++ src/operator/random/unique_sample_op.h | 170 tests/python/unittest/test_random.py| 17 3 files changed, 259 insertions(+) diff --git a/src/operator/random/unique_sample_op.cc b/src/operator/random/unique_sample_op.cc new file mode 100644 index 000..4936669 --- /dev/null +++ b/src/operator/random/unique_sample_op.cc @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2016 by Contributors + * \file sample_op.cc + * \brief CPU Implementation of unique sample op + */ + +#include "./unique_sample_op.h" +#include "../tensor/init_op.h" + +namespace mxnet { +namespace op { + +DMLC_REGISTER_PARAMETER(SampleUniqueZifpianParam); + +#define MXNET_OPERATOR_REGISTER_UNIQUE_SAMPLE(name, ParamType) \ + NNVM_REGISTER_OP(name) \ + .set_num_inputs(0) \ + .set_num_outputs(2) \ + .set_attr_parser(ParamParser) \ + .set_attr("FResourceRequest", UniqueSampleResource)\ + .add_arguments(ParamType::__FIELDS__()) + +MXNET_OPERATOR_REGISTER_UNIQUE_SAMPLE(_sample_unique_zipfian, + SampleUniqueZifpianParam) +.describe(R"code(Draw random samples from an an approximately log-uniform +or Zipfian distribution without replacement. + +This operation takes a 2-D shape `(batch_size, num_sampled)`, +and randomly generates *num_sampled* samples from the range of integers [0, range_max) +for each instance in the batch. + +The elements in each instance are drawn without replacement from the base distribution. +The base distribution for this operator is an approximately log-uniform or Zipfian distribution: + + P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1) + +Additionaly, it also returns the number of trials used to obtain `num_sampled` samples for +each instance in the batch. + +Example:: + + samples, trials = _sample_unique_zipfian(75, shape=(4, 8192)) + unique(samples[0]) = 8192 + unique(samples[3]) = 8192 + trials[0] = 16435 + +)code" ADD_FILELINE) +.set_attr("FInferShape", SampleUniqueShape) +.set_attr("FInferType", SampleUniqueType) +.set_attr("FCompute", SampleUniqueZifpian); + +} // namespace op +} // namespace mxnet diff --git a/src/operator/random/unique_sample_op.h b/src/operator/random/unique_sample_op.h new file mode 100644 index 000..2e93b50 --- /dev/null +++ b/src/operator/random/unique_sample_op.h @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2018 by Contributors + * \file sample_op.h + * \brief Elementary
[GitHub] szha closed pull request #12113: zipfian random sampler without replacement
szha closed pull request #12113: zipfian random sampler without replacement URL: https://github.com/apache/incubator-mxnet/pull/12113 This is a PR merged from a forked repository. As GitHub hides the original diff on merge, it is displayed below for the sake of provenance: As this is a foreign pull request (from a fork), the diff is supplied below (as it won't show otherwise due to GitHub magic): diff --git a/src/operator/random/unique_sample_op.cc b/src/operator/random/unique_sample_op.cc new file mode 100644 index 000..49366697ed6 --- /dev/null +++ b/src/operator/random/unique_sample_op.cc @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2016 by Contributors + * \file sample_op.cc + * \brief CPU Implementation of unique sample op + */ + +#include "./unique_sample_op.h" +#include "../tensor/init_op.h" + +namespace mxnet { +namespace op { + +DMLC_REGISTER_PARAMETER(SampleUniqueZifpianParam); + +#define MXNET_OPERATOR_REGISTER_UNIQUE_SAMPLE(name, ParamType) \ + NNVM_REGISTER_OP(name) \ + .set_num_inputs(0) \ + .set_num_outputs(2) \ + .set_attr_parser(ParamParser) \ + .set_attr("FResourceRequest", UniqueSampleResource)\ + .add_arguments(ParamType::__FIELDS__()) + +MXNET_OPERATOR_REGISTER_UNIQUE_SAMPLE(_sample_unique_zipfian, + SampleUniqueZifpianParam) +.describe(R"code(Draw random samples from an an approximately log-uniform +or Zipfian distribution without replacement. + +This operation takes a 2-D shape `(batch_size, num_sampled)`, +and randomly generates *num_sampled* samples from the range of integers [0, range_max) +for each instance in the batch. + +The elements in each instance are drawn without replacement from the base distribution. +The base distribution for this operator is an approximately log-uniform or Zipfian distribution: + + P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1) + +Additionaly, it also returns the number of trials used to obtain `num_sampled` samples for +each instance in the batch. + +Example:: + + samples, trials = _sample_unique_zipfian(75, shape=(4, 8192)) + unique(samples[0]) = 8192 + unique(samples[3]) = 8192 + trials[0] = 16435 + +)code" ADD_FILELINE) +.set_attr("FInferShape", SampleUniqueShape) +.set_attr("FInferType", SampleUniqueType) +.set_attr("FCompute", SampleUniqueZifpian); + +} // namespace op +} // namespace mxnet diff --git a/src/operator/random/unique_sample_op.h b/src/operator/random/unique_sample_op.h new file mode 100644 index 000..2e93b501f1b --- /dev/null +++ b/src/operator/random/unique_sample_op.h @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * Copyright (c) 2018 by Contributors + * \file sample_op.h + * \brief Elementary unique sampling operators + */ +#ifndef MXNET_OPERATOR_RANDOM_UNIQUE_SAMPLE_OP_H_ +#define MXNET_OPERATOR_RANDOM_UNIQUE_SAMPLE_OP_H_ + +#include +#include +#include +#include +#include +#include +#include +#include "../mxnet_op.h" +#include "../operator_common.h" +#include "./sampler.h" + +namespace mxnet { +namespace op { + +struct SampleUniqueZifpianParam : public dmlc::Parameter { + int range_max; + TShape shape; +
[GitHub] hhyasdf commented on issue #7924: get stuck training on multiple machines error
hhyasdf commented on issue #7924: get stuck training on multiple machines error URL: https://github.com/apache/incubator-mxnet/issues/7924#issuecomment-412286141 It maybe is because of that there is a extra firewall in CentOS except iptables, and you should have got it closed. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] tqchen commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API
tqchen commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API URL: https://github.com/apache/incubator-mxnet/pull/12047#discussion_r209430981 ## File path: python/mxnet/_ctypes/ndarray.py ## @@ -31,21 +31,24 @@ class NDArrayBase(object): """Base data structure for ndarray""" -__slots__ = ["handle", "writable"] +__slots__ = ["handle", "writable", "dlpack"] # pylint: disable= no-member -def __init__(self, handle, writable=True): +def __init__(self, handle, writable=True, dlpack=None): Review comment: If you copy the NDArray, they hold the same shared_ptr to the data, note that shared_ptr can be copied, and its ref counter is automatically managed This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] tqchen commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API
tqchen commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API URL: https://github.com/apache/incubator-mxnet/pull/12047#discussion_r209430981 ## File path: python/mxnet/_ctypes/ndarray.py ## @@ -31,21 +31,24 @@ class NDArrayBase(object): """Base data structure for ndarray""" -__slots__ = ["handle", "writable"] +__slots__ = ["handle", "writable", "dlpack"] # pylint: disable= no-member -def __init__(self, handle, writable=True): +def __init__(self, handle, writable=True, dlpack=None): Review comment: If you copy the NDArray, they hold the same shared_ptr to the data This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] larroy commented on issue #12118: fix potential floating number overflow, enable float16
larroy commented on issue #12118: fix potential floating number overflow, enable float16 URL: https://github.com/apache/incubator-mxnet/pull/12118#issuecomment-412278531 ssize_t then? It’s not portable otherwise This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] marcoabreu closed pull request #12125: CI scripts refinements. Separate Py2 and Py3 installs cripts. Fix perms.
marcoabreu closed pull request #12125: CI scripts refinements. Separate Py2 and Py3 installs cripts. Fix perms. URL: https://github.com/apache/incubator-mxnet/pull/12125 This is a PR merged from a forked repository. As GitHub hides the original diff on merge, it is displayed below for the sake of provenance: As this is a foreign pull request (from a fork), the diff is supplied below (as it won't show otherwise due to GitHub magic): diff --git a/ci/build.py b/ci/build.py index a9d6a63537f..0a1ad4cf575 100755 --- a/ci/build.py +++ b/ci/build.py @@ -43,6 +43,43 @@ CCACHE_MAXSIZE = '500G' + + +def retry(ExceptionToCheck, tries=4, delay_s=1, backoff=2): +"""Retry calling the decorated function using an exponential backoff. + +http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/ +original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry + +:param ExceptionToCheck: the exception to check. may be a tuple of +exceptions to check +:type ExceptionToCheck: Exception or tuple +:param tries: number of times to try (not retry) before giving up +:type tries: int +:param delay_s: initial delay between retries in seconds +:type delay_s: int +:param backoff: backoff multiplier e.g. value of 2 will double the delay +each retry +:type backoff: int +""" +import time +from functools import wraps +def decorated_retry(f): +@wraps(f) +def f_retry(*args, **kwargs): +mtries, mdelay = tries, delay_s +while mtries > 1: +try: +return f(*args, **kwargs) +except ExceptionToCheck as e: +logging.warning("Exception: %s, Retrying in %d seconds...", str(e), mdelay) +time.sleep(mdelay) +mtries -= 1 +mdelay *= backoff +return f(*args, **kwargs) +return f_retry # true decorator +return decorated_retry + def under_ci() -> bool: """:return: True if we run in Jenkins.""" return 'JOB_NAME' in os.environ @@ -77,9 +114,8 @@ def build_docker(platform: str, docker_binary: str, registry: str, num_retries: :param num_retries: Number of retries to build the docker image :return: Id of the top level image """ - tag = get_docker_tag(platform=platform, registry=registry) -logging.info("Building container tagged '%s' with %s", tag, docker_binary) +logging.info("Building docker container tagged '%s' with %s", tag, docker_binary) # # We add a user with the same group as the executing non-root user so files created in the # container match permissions of the local user. Same for the group. @@ -91,40 +127,24 @@ def build_docker(platform: str, docker_binary: str, registry: str, num_retries: # docker pull see: docker_cache.load_docker_cache # # This doesn't work with multi head docker files. -# - -for i in range(num_retries): -logging.info('%d out of %d tries to build the docker image.', i + 1, num_retries) - -cmd = [docker_binary, "build", - "-f", get_dockerfile(platform), - "--build-arg", "USER_ID={}".format(os.getuid()), - "--build-arg", "GROUP_ID={}".format(os.getgid()), - "--cache-from", tag, - "-t", tag, - "docker"] +# +cmd = [docker_binary, "build", + "-f", get_dockerfile(platform), + "--build-arg", "USER_ID={}".format(os.getuid()), + "--build-arg", "GROUP_ID={}".format(os.getgid()), + "--cache-from", tag, + "-t", tag, + "docker"] + +@retry(subprocess.CalledProcessError, tries=num_retries) +def run_cmd(): logging.info("Running command: '%s'", ' '.join(cmd)) -try: -check_call(cmd) -# Docker build was successful. Call break to break out of the retry mechanism -break -except subprocess.CalledProcessError as e: -saved_exception = e -logging.error('Failed to build docker image') -# Building the docker image failed. Call continue to trigger the retry mechanism -continue -else: -# Num retries exceeded -logging.exception('Exception during build of docker image', saved_exception) -logging.fatal('Failed to build the docker image, aborting...') -sys.exit(1) +check_call(cmd) +run_cmd() # Get image id by reading the tag. It's guaranteed (except race condition) that the tag exists. Otherwise, the # check_call would have failed -image_id = _get_local_image_id(docker_binary=docker_binary, docker_tag=tag) -if not image_id: -raise FileNotFoundError('Unable to find docker image id matching with {}'.format(tag)) -return image_id +return _get_local_image_id(docker_binary=docker_binary, docker_tag=tag) def
[incubator-mxnet] branch master updated: CI scripts refinements. Separate Py2 and Py3 installs cripts. Fix perms. (#12125)
This is an automated email from the ASF dual-hosted git repository. marcoabreu pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git The following commit(s) were added to refs/heads/master by this push: new 1b60478 CI scripts refinements. Separate Py2 and Py3 installs cripts. Fix perms. (#12125) 1b60478 is described below commit 1b60478db2c66ef82f5b3ed48b12832d6da72e61 Author: Pedro Larroy <928489+lar...@users.noreply.github.com> AuthorDate: Sat Aug 11 16:20:29 2018 +0200 CI scripts refinements. Separate Py2 and Py3 installs cripts. Fix perms. (#12125) --- ci/build.py| 97 +- ci/docker/Dockerfile.build.android_armv7 | 0 ci/docker/Dockerfile.build.android_armv8 | 0 ci/docker/Dockerfile.build.armv6 | 0 ci/docker/Dockerfile.build.armv7 | 0 ci/docker/Dockerfile.build.armv8 | 0 ci/docker/Dockerfile.build.centos7_cpu | 0 ci/docker/Dockerfile.build.centos7_gpu | 0 ci/docker/Dockerfile.build.jetson | 0 ci/docker/Dockerfile.build.ubuntu_base_cpu | 0 ci/docker/Dockerfile.build.ubuntu_base_gpu | 0 ci/docker/Dockerfile.build.ubuntu_blc | 9 +- ci/docker/Dockerfile.build.ubuntu_build_cuda | 14 +++- ci/docker/Dockerfile.build.ubuntu_cpu | 7 +- ci/docker/Dockerfile.build.ubuntu_gpu | 7 +- ci/docker/Dockerfile.build.ubuntu_gpu_tensorrt | 10 ++- ci/docker/Dockerfile.build.ubuntu_nightly_cpu | 7 +- ci/docker/Dockerfile.build.ubuntu_nightly_gpu | 7 +- ci/docker/Dockerfile.build.ubuntu_rat | 0 .../{ubuntu_python.sh => ubuntu_python2.sh}| 6 +- .../{ubuntu_python.sh => ubuntu_python3.sh}| 6 +- ci/docker/runtime_functions.sh | 1 + ci/test_docker_cache.py| 0 23 files changed, 111 insertions(+), 60 deletions(-) diff --git a/ci/build.py b/ci/build.py index a9d6a63..0a1ad4c 100755 --- a/ci/build.py +++ b/ci/build.py @@ -43,6 +43,43 @@ from util import * CCACHE_MAXSIZE = '500G' + + +def retry(ExceptionToCheck, tries=4, delay_s=1, backoff=2): +"""Retry calling the decorated function using an exponential backoff. + +http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/ +original from: http://wiki.python.org/moin/PythonDecoratorLibrary#Retry + +:param ExceptionToCheck: the exception to check. may be a tuple of +exceptions to check +:type ExceptionToCheck: Exception or tuple +:param tries: number of times to try (not retry) before giving up +:type tries: int +:param delay_s: initial delay between retries in seconds +:type delay_s: int +:param backoff: backoff multiplier e.g. value of 2 will double the delay +each retry +:type backoff: int +""" +import time +from functools import wraps +def decorated_retry(f): +@wraps(f) +def f_retry(*args, **kwargs): +mtries, mdelay = tries, delay_s +while mtries > 1: +try: +return f(*args, **kwargs) +except ExceptionToCheck as e: +logging.warning("Exception: %s, Retrying in %d seconds...", str(e), mdelay) +time.sleep(mdelay) +mtries -= 1 +mdelay *= backoff +return f(*args, **kwargs) +return f_retry # true decorator +return decorated_retry + def under_ci() -> bool: """:return: True if we run in Jenkins.""" return 'JOB_NAME' in os.environ @@ -77,9 +114,8 @@ def build_docker(platform: str, docker_binary: str, registry: str, num_retries: :param num_retries: Number of retries to build the docker image :return: Id of the top level image """ - tag = get_docker_tag(platform=platform, registry=registry) -logging.info("Building container tagged '%s' with %s", tag, docker_binary) +logging.info("Building docker container tagged '%s' with %s", tag, docker_binary) # # We add a user with the same group as the executing non-root user so files created in the # container match permissions of the local user. Same for the group. @@ -91,40 +127,24 @@ def build_docker(platform: str, docker_binary: str, registry: str, num_retries: # docker pull see: docker_cache.load_docker_cache # # This doesn't work with multi head docker files. -# - -for i in range(num_retries): -logging.info('%d out of %d tries to build the docker image.', i + 1, num_retries) - -cmd = [docker_binary, "build", - "-f", get_dockerfile(platform), - "--build-arg", "USER_ID={}".format(os.getuid()), - "--build-arg", "GROUP_ID={}".format(os.getgid()), - "--cache-from", tag, -
[GitHub] kppl commented on issue #1161: ImpportError: No module named skimage when running Neural-style example
kppl commented on issue #1161: ImpportError: No module named skimage when running Neural-style example URL: https://github.com/apache/incubator-mxnet/issues/1161#issuecomment-412275820 `from skimage import transform ` This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] pengzhao-intel commented on issue #12134: why MKL and MKL-DNN can't be used simultaneously in ChooseBlas.cmake
pengzhao-intel commented on issue #12134: why MKL and MKL-DNN can't be used simultaneously in ChooseBlas.cmake URL: https://github.com/apache/incubator-mxnet/issues/12134#issuecomment-412275435 yes, it's a little trick and this is a piece of legacy code which we want to refine for a while. You can refer this issue and @jinhuang415 can help for the details. https://github.com/apache/incubator-mxnet/issues/10175 This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] YouRancestor opened a new pull request #12136: accept GPU data as input
YouRancestor opened a new pull request #12136: accept GPU data as input URL: https://github.com/apache/incubator-mxnet/pull/12136 ## Description ## Add a C Predict API interface MXPredSetInputGPU to accept input data on GPU. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[incubator-mxnet] branch master updated: fix cython nnvm include path (#12133)
This is an automated email from the ASF dual-hosted git repository. marcoabreu pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git The following commit(s) were added to refs/heads/master by this push: new 9fd45b8 fix cython nnvm include path (#12133) 9fd45b8 is described below commit 9fd45b85804ee7a52a79a6a3aa7ff949dfb7c5d1 Author: Jingbei Li AuthorDate: Sat Aug 11 20:49:32 2018 +0800 fix cython nnvm include path (#12133) #12123 --- python/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/setup.py b/python/setup.py index ec8414c..add5e66 100644 --- a/python/setup.py +++ b/python/setup.py @@ -89,7 +89,7 @@ def config_cython(): ret.append(Extension( "mxnet/%s/.%s" % (subdir, fn[:-4]), ["mxnet/cython/%s" % fn], -include_dirs=["../include/", "../3rdparty/nnvm/include"], +include_dirs=["../include/", "../3rdparty/tvm/nnvm/include"], library_dirs=library_dirs, libraries=libraries, language="c++"))
[GitHub] marcoabreu closed issue #12123: cython nnvm include path error
marcoabreu closed issue #12123: cython nnvm include path error URL: https://github.com/apache/incubator-mxnet/issues/12123 This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] marcoabreu closed pull request #12133: fix cython nnvm include path
marcoabreu closed pull request #12133: fix cython nnvm include path URL: https://github.com/apache/incubator-mxnet/pull/12133 This is a PR merged from a forked repository. As GitHub hides the original diff on merge, it is displayed below for the sake of provenance: As this is a foreign pull request (from a fork), the diff is supplied below (as it won't show otherwise due to GitHub magic): diff --git a/python/setup.py b/python/setup.py index ec8414c8521..add5e6681fe 100644 --- a/python/setup.py +++ b/python/setup.py @@ -89,7 +89,7 @@ def config_cython(): ret.append(Extension( "mxnet/%s/.%s" % (subdir, fn[:-4]), ["mxnet/cython/%s" % fn], -include_dirs=["../include/", "../3rdparty/nnvm/include"], +include_dirs=["../include/", "../3rdparty/tvm/nnvm/include"], library_dirs=library_dirs, libraries=libraries, language="c++")) This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] marcoabreu commented on issue #12134: why MKL and MKL-DNN can't be used simultaneously in ChooseBlas.cmake
marcoabreu commented on issue #12134: why MKL and MKL-DNN can't be used simultaneously in ChooseBlas.cmake URL: https://github.com/apache/incubator-mxnet/issues/12134#issuecomment-412273016 @lebeg This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[incubator-mxnet-site] branch asf-site updated: Bump the publish timestamp.
This is an automated email from the ASF dual-hosted git repository. zhasheng pushed a commit to branch asf-site in repository https://gitbox.apache.org/repos/asf/incubator-mxnet-site.git The following commit(s) were added to refs/heads/asf-site by this push: new 98d8109 Bump the publish timestamp. 98d8109 is described below commit 98d8109cab20eb631a2ce8dd0cb8b44620bbf35f Author: mxnet-ci AuthorDate: Sat Aug 11 12:46:06 2018 + Bump the publish timestamp. --- date.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/date.txt b/date.txt new file mode 100644 index 000..a1c6aae --- /dev/null +++ b/date.txt @@ -0,0 +1 @@ +Sat Aug 11 12:46:06 UTC 2018
[GitHub] squidszyd closed issue #12135: Writing own C++ Data Iter
squidszyd closed issue #12135: Writing own C++ Data Iter URL: https://github.com/apache/incubator-mxnet/issues/12135 This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] squidszyd opened a new issue #12135: Writing own C++ Data Iter
squidszyd opened a new issue #12135: Writing own C++ Data Iter URL: https://github.com/apache/incubator-mxnet/issues/12135 https://github.com/apache/incubator-mxnet/blob/c44f16b0909d94c9beaf9c5fc0773855bbc91807/src/io/inst_vector.h#L147 I'm currently implementing a data iterator for generating data multi-task model, which needs various data and labels as input. However, as I going on, I've found that the data holder `InstVector` is designed to hold only one data and one label. Should the `Push` method of `InstVector` be designed like this ? ``` inline void Push(unsigned index, std::vector>& dshape, std::vector>& lshape) // b.t.w., it is not friendly for label other than 1-Dim ``` Or, is there another way? This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] luchangli03 commented on issue #12134: why MKL and MKL-DNN can't be used simultaneously in ChooseBlas.cmake
luchangli03 commented on issue #12134: why MKL and MKL-DNN can't be used simultaneously in ChooseBlas.cmake URL: https://github.com/apache/incubator-mxnet/issues/12134#issuecomment-412268644 in the 3rdparty/mshadow/cmake/mshadow.cmake the codes are also if(MKL_FOUND) if(USE_MKLML_MKL) set(BLAS "open") else() set(BLAS "MKL") endif() endif() I can't understand This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] luchangli03 opened a new issue #12134: why MKL and MKL-DNN can't be used simultaneously in ChooseBlas.cmake
luchangli03 opened a new issue #12134: why MKL and MKL-DNN can't be used simultaneously in ChooseBlas.cmake URL: https://github.com/apache/incubator-mxnet/issues/12134 if(MKL_FOUND) if(USE_MKLDNN) set(BLAS "open") else() set(BLAS "MKL") endif() endif() This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] perdasilva commented on issue #12090: [MXNET-791][WIP] Pick with negative indices
perdasilva commented on issue #12090: [MXNET-791][WIP] Pick with negative indices URL: https://github.com/apache/incubator-mxnet/pull/12090#issuecomment-412266275 I believe that's all done now. Thanks for everything ^^ This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] lupesko commented on a change in pull request #12102: site-wide social include
lupesko commented on a change in pull request #12102: site-wide social include URL: https://github.com/apache/incubator-mxnet/pull/12102#discussion_r209420814 ## File path: docs/community/contribute.md ## @@ -62,13 +62,37 @@ To join the MXNet slack channel send request to the contributor mailing list. ### Social Media -Keep connected with the latest MXNet news and updates on [Twitter](https://twitter.com/apachemxnet) and [Reddit](https://reddit.com/r/mxnet). Also, subscribe to the [MXNet YouTube channel](https://www.youtube.com/channel/UCQua2ZAkbr_Shsgfk1LCy6A). +Keep connected with the latest MXNet news and updates. Review comment: Cool. > I might need to requestion a 4k monitor I'm sure you can find a manager at AWS that will approve such a request... This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] petronny edited a comment on issue #12123: cython nnvm include path error
petronny edited a comment on issue #12123: cython nnvm include path error URL: https://github.com/apache/incubator-mxnet/issues/12123#issuecomment-412259848 Sorry, I'm afraid not because I have a pool network connection to apache JIRA... But I have created a PR on github, will it work? This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] petronny opened a new pull request #12133: fix cython nnvm include path
petronny opened a new pull request #12133: fix cython nnvm include path URL: https://github.com/apache/incubator-mxnet/pull/12133 ## Description ## fix cython nnvm include path ## Checklist ## ### Essentials ### Please feel free to remove inapplicable items for your PR. - [ ] The PR title starts with [MXNET-$JIRA_ID], where $JIRA_ID refers to the relevant [JIRA issue](https://issues.apache.org/jira/projects/MXNET/issues) created (except PRs with tiny changes) - [ ] Changes are complete (i.e. I finished coding on this PR) - [ ] All changes have test coverage: - Unit tests are added for small changes to verify correctness (e.g. adding a new operator) - Nightly tests are added for complicated/long-running ones (e.g. changing distributed kvstore) - Build tests will be added for build configuration changes (e.g. adding a new build option with NCCL) - [ ] Code is well-documented: - For user-facing API changes, API doc string has been updated. - For new C++ functions in header files, their functionalities and arguments are documented. - For new examples, README.md is added to explain the what the example does, the source of the dataset, expected performance on test set and reference to the original paper if applicable - Check the API doc at http://mxnet-ci-doc.s3-accelerate.dualstack.amazonaws.com/PR-$PR_ID/$BUILD_ID/index.html - [ ] To the my best knowledge, examples are either not affected by this change, or have been fixed to be compatible with this change ### Changes ### - [ ] fix cython nnvm include path ## Comments ## - If this change is a backward incompatible change, why must this change be made. - Interesting edge cases to note here #12123 This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] petronny commented on issue #12123: cython nnvm include path error
petronny commented on issue #12123: cython nnvm include path error URL: https://github.com/apache/incubator-mxnet/issues/12123#issuecomment-412259848 Sorry, I'm afraid not because I have a pool network connection to apache JIRA... This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] kaleidoscopical commented on issue #12001: SyncBatchNorm problems
kaleidoscopical commented on issue #12001: SyncBatchNorm problems URL: https://github.com/apache/incubator-mxnet/issues/12001#issuecomment-412258741 @safrooze I have tried both of them. While the `nd` version seems to perform similarly as the original `BatchNorm`, the `sym` version fails when calling `asnumpy()`. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API
wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API URL: https://github.com/apache/incubator-mxnet/pull/12047#discussion_r209418577 ## File path: src/c_api/c_api.cc ## @@ -500,6 +500,33 @@ int MXNDArrayGetData(NDArrayHandle handle, API_END(); } +int MXNDArrayToDLPack(NDArrayHandle handle, + DLManagedTensorHandle *out_dlpack) { + API_BEGIN(); + NDArray *arr = static_cast(handle); + *out_dlpack = arr->ToDLPack(); + API_END(); +} + +int MXNDArrayFromDLPack(DLManagedTensorHandle dlpack, +NDArrayHandle *out_handle) { + API_BEGIN(); + NDArray *pdata = new NDArray(); + *pdata = NDArray::FromDLPack( Review comment: Does it need Rvalue Referene? This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API
wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API URL: https://github.com/apache/incubator-mxnet/pull/12047#discussion_r209418577 ## File path: src/c_api/c_api.cc ## @@ -500,6 +500,33 @@ int MXNDArrayGetData(NDArrayHandle handle, API_END(); } +int MXNDArrayToDLPack(NDArrayHandle handle, + DLManagedTensorHandle *out_dlpack) { + API_BEGIN(); + NDArray *arr = static_cast(handle); + *out_dlpack = arr->ToDLPack(); + API_END(); +} + +int MXNDArrayFromDLPack(DLManagedTensorHandle dlpack, +NDArrayHandle *out_handle) { + API_BEGIN(); + NDArray *pdata = new NDArray(); + *pdata = NDArray::FromDLPack( Review comment: Does it need Rvalue Referene? This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] apeforest commented on issue #11773: Update PyPI version number
apeforest commented on issue #11773: Update PyPI version number URL: https://github.com/apache/incubator-mxnet/pull/11773#issuecomment-412255848 @sandeep-krishnamurthy re-build. Please approve. Thanks! This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API
wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API URL: https://github.com/apache/incubator-mxnet/pull/12047#discussion_r209418365 ## File path: src/c_api/c_api.cc ## @@ -494,6 +494,57 @@ int MXNDArrayGetData(NDArrayHandle handle, API_END(); } +int MXNDArrayToDLPack(NDArrayHandle handle, + DLManagedTensorHandle *out_dlpack) { + API_BEGIN(); + NDArray *arr = static_cast(handle); + *out_dlpack = arr->ToDLPack(); + API_END(); +} + +int MXNDArrayFromDLPack(DLManagedTensorHandle dlpack, +NDArrayHandle *out_handle) { + API_BEGIN(); + NDArray *pdata = new NDArray(); + *pdata = NDArray::FromDLPack( + static_cast(dlpack)); + *out_handle = pdata; + API_END(); +} + +int MXNDArrayCallDLPackDeleter(DLManagedTensorHandle dlpack) { + API_BEGIN(); + if (dlpack) { +DLManagedTensor *p_dlpack = static_cast(dlpack); +p_dlpack->deleter(p_dlpack); + } + API_END(); +} + + +typedef struct { +char py_object[16]; Review comment: I have removed it in the latest PR. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API
wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API URL: https://github.com/apache/incubator-mxnet/pull/12047#discussion_r209418176 ## File path: python/mxnet/ndarray/ndarray.py ## @@ -3851,3 +3898,117 @@ def histogram(a, bins=10, range=None): return _internal._histogram(data=a, bin_cnt=bins, range=range) raise ValueError("bins argument should be either an integer or an NDArray") # pylint: enable= no-member, protected-access, redefined-builtin + +pycapsule_dlpack_deleter = ctypes.CFUNCTYPE(None, ctypes.c_void_p)( +_LIB.MXNDArrayCallDLPackCapsuleDeleter) + +def to_dlpack_for_read(data): +"""Returns a reference view of NDArray that represents as DLManagedTensor until + all previous write operations on the current array are finished. + +Parameters +-- +data: NDArray +input data. + +Returns +--- +PyCapsule (the pointer of DLManagedTensor) +a reference view of NDArray that represents as DLManagedTensor. + +Examples + +>>> x = mx.nd.ones((2,3)) +>>> y = mx.nd.to_dlpack_for_read(x) +>>> type(y) + +>>> z = mx.nd.from_dlpack(y) +>>> z +[[1. 1. 1.] + [1. 1. 1.]] + +""" +data.wait_to_read() +dlpack = DLPackHandle() +check_call(_LIB.MXNDArrayToDLPack(data.handle, ctypes.byref(dlpack))) +return ctypes.pythonapi.PyCapsule_New(dlpack, b'dltensor', pycapsule_dlpack_deleter) + +def to_dlpack_for_write(data): +"""Returns a reference view of NDArray that represents as DLManagedTensor until + all previous read/write operations on the current array are finished. + +Parameters +-- +data: NDArray +input data. + +Returns +--- +PyCapsule (the pointer of DLManagedTensor) +a reference view of NDArray that represents as DLManagedTensor. + +Examples + +>>> x = mx.nd.ones((2,3)) +>>> w = mx.nd.to_dlpack_for_write(x) +>>> type(w) + +>>> u = mx.nd.from_dlpack(w) +>>> u += 1 +>>> x +[[2. 2. 2.] + [2. 2. 2.]] + +""" +check_call(_LIB.MXNDArrayWaitToWrite(data.handle)) +dlpack = DLPackHandle() +check_call(_LIB.MXNDArrayToDLPack(data.handle, ctypes.byref(dlpack))) +return ctypes.pythonapi.PyCapsule_New(dlpack, b'dltensor', pycapsule_dlpack_deleter) Review comment: Solved it. Thank you! This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API
wkcn commented on a change in pull request #12047: [MXNET-779]Add DLPack Transformation API URL: https://github.com/apache/incubator-mxnet/pull/12047#discussion_r209416817 ## File path: include/mxnet/c_api.h ## @@ -737,6 +741,57 @@ MXNET_DLL int MXNDArrayGetShape(NDArrayHandle handle, */ MXNET_DLL int MXNDArrayGetData(NDArrayHandle handle, void **out_pdata); +/*! +* \brief Create a reference view of NDArray that +* represents as DLManagedTensor until +* all the pending writes with respect NDArray are finished. +* \param handle the handle to the ndarray +* \param out_dlpack pointer holder to get pointer of DLManagedTensor +* \return 0 when success, -1 when failure happens +*/ +MXNET_DLL int MXNDArrayToDLPackForRead(NDArrayHandle handle, + DLManagedTensorHandle *out_dlpack); + +/*! +* \brief Create a reference view of NDArray that +* represents as DLManagedTensor until +* all the pending reads/writes with respect NDArray are finished. +* \param handle the handle to the ndarray +* \param out_dlpack pointer holder to get pointer of DLManagedTensor +* \return 0 when success, -1 when failure happens +*/ +MXNET_DLL int MXNDArrayToDLPackForWrite(NDArrayHandle handle, +DLManagedTensorHandle *out_dlpack); + +/*! +* \brief Create a NDArray backed by a dlpack tensor. +* +* This allows us to create a NDArray using the memory +* allocated by an external deep learning framework +* that is DLPack compatible. +* +* The memory is retained until the NDArray went out of scope. +* +* \param dlpack the pointer of the input DLManagedTensor +* \param out_handle pointer holder to get pointer of NDArray +* \return 0 when success, -1 when failure happens +*/ +MXNET_DLL int MXNDArrayFromDLPack(DLManagedTensorHandle dlpack, + NDArrayHandle *out_handle); +/*! + * \brief Delete a dlpack tensor + * \param dlpack the pointer of the input DLManagedTensor + * \return 0 when success, -1 when failure happens + */ +MXNET_DLL int MXNDArrayCallDLPackDeleter(DLManagedTensorHandle dlpack); + +/*! + * \brief Delete a dlpack tensor + * \param dlpack_capsule the pointer of a PyCapsule storing DLManagedTensor + * \return 0 when success, -1 when failure happens + */ +MXNET_DLL void MXNDArrayCallDLPackCapsuleDeleter(PyObjectHandle dlpack_capsule); Review comment: Thank you! I found it works on Windows and Linux. I have updated the PR. This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services
[GitHub] jasonyu1996 opened a new issue #12132: [Feature Request] Implementation of L-BFGS optimizer
jasonyu1996 opened a new issue #12132: [Feature Request] Implementation of L-BFGS optimizer URL: https://github.com/apache/incubator-mxnet/issues/12132 Hi! Would the developers consider adding an implementation of limited memory BFGS to the library? PyTorch offers one here: https://pytorch.org/docs/stable/optim.html?#torch.optim.LBFGS This is an automated message from the Apache Git Service. To respond to the message, please log on GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org With regards, Apache Git Services