This is an automated email from the ASF dual-hosted git repository.
zhasheng pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git
The following commit(s) were added to refs/heads/master by this push:
new e9a5a0a Change mx.test_utils.list_gpus to mx.context.num_gpus where
possible (#14946)
e9a5a0a is described below
commit e9a5a0a8f96d842161d0dd2176b46974c842cec3
Author: Pedro Larroy <[email protected]>
AuthorDate: Thu May 30 11:56:57 2019 -0700
Change mx.test_utils.list_gpus to mx.context.num_gpus where possible
(#14946)
* Change mx_test_utils.list_gpus to mx.context.num_gpus where possible
https://github.com/apache/incubator-mxnet/pull/14926
* Remove mx.test_utils.list_gpus
* revert accidental unrelated dmlc-core changes
* Fix basic_layers.py import
* Fix typo
* restore list_gpus() in tests_utils for compat
* restore dmlc-core
---
docs/tutorials/gluon/datasets.md | 2 +-
docs/tutorials/gluon/info_gan.md | 2 +-
docs/tutorials/gluon/learning_rate_finder.md | 2 +-
docs/tutorials/gluon/learning_rate_schedules.md | 2 +-
docs/tutorials/gluon/save_load_params.md | 2 +-
docs/tutorials/nlp/cnn.md | 2 +-
docs/tutorials/python/kvstore.md | 8 ++++----
docs/tutorials/python/mnist.md | 2 +-
docs/tutorials/python/profiler.md | 2 +-
docs/tutorials/unsupervised_learning/gan.md | 2 +-
example/adversary/adversary_generation.ipynb | 2 +-
example/autoencoder/convolutional_autoencoder.ipynb | 2 +-
example/bi-lstm-sort/bi-lstm-sort.ipynb | 2 +-
example/distributed_training-horovod/gluon_mnist.py | 2 +-
example/distributed_training-horovod/module_mnist.py | 2 +-
example/image-classification/test_score.py | 9 ++++-----
example/multi-task/multi-task-learning.ipynb | 2 +-
example/recommenders/demo2-dssm.ipynb | 2 +-
example/svm_mnist/svm_mnist.py | 4 ++--
python/mxnet/gluon/contrib/nn/basic_layers.py | 4 ++--
tests/python/gpu/test_nccl.py | 2 +-
tests/python/profiling/test_nvtx.py | 2 +-
tools/caffe_converter/test_converter.py | 6 +++---
23 files changed, 33 insertions(+), 34 deletions(-)
diff --git a/docs/tutorials/gluon/datasets.md b/docs/tutorials/gluon/datasets.md
index c029124..6f645bb 100644
--- a/docs/tutorials/gluon/datasets.md
+++ b/docs/tutorials/gluon/datasets.md
@@ -157,7 +157,7 @@ def construct_net():
return net
# construct and initialize network.
-ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()
+ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()
net = construct_net()
net.hybridize()
diff --git a/docs/tutorials/gluon/info_gan.md b/docs/tutorials/gluon/info_gan.md
index 93fd6cb..91adf6c 100644
--- a/docs/tutorials/gluon/info_gan.md
+++ b/docs/tutorials/gluon/info_gan.md
@@ -51,7 +51,7 @@ batch_size = 64
z_dim = 100
n_continuous = 2
n_categories = 10
-ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()
+ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()
```
Some functions to load and normalize images.
diff --git a/docs/tutorials/gluon/learning_rate_finder.md
b/docs/tutorials/gluon/learning_rate_finder.md
index 30c66e3..b580bee 100644
--- a/docs/tutorials/gluon/learning_rate_finder.md
+++ b/docs/tutorials/gluon/learning_rate_finder.md
@@ -231,7 +231,7 @@ Using a Pre-activation ResNet-18 from the Gluon model zoo,
we instantiate our Le
```python
-ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()
+ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()
net = mx.gluon.model_zoo.vision.resnet18_v2(classes=10)
learner = Learner(net=net, data_loader=data_loader, ctx=ctx)
lr_finder = LRFinder(learner)
diff --git a/docs/tutorials/gluon/learning_rate_schedules.md
b/docs/tutorials/gluon/learning_rate_schedules.md
index 46c79eb..1196773 100644
--- a/docs/tutorials/gluon/learning_rate_schedules.md
+++ b/docs/tutorials/gluon/learning_rate_schedules.md
@@ -140,7 +140,7 @@ As discussed above, the schedule should return a learning
rate given an (1-based
```python
# Use GPU if one exists, else use CPU
-ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()
+ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()
# MNIST images are 28x28. Total pixels in input layer is 28x28 = 784
num_inputs = 784
diff --git a/docs/tutorials/gluon/save_load_params.md
b/docs/tutorials/gluon/save_load_params.md
index 26d6b89..c82ec5a 100644
--- a/docs/tutorials/gluon/save_load_params.md
+++ b/docs/tutorials/gluon/save_load_params.md
@@ -50,7 +50,7 @@ Let's define a helper function to build a LeNet model and
another helper to trai
```python
# Use GPU if one exists, else use CPU
-ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()
+ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()
# MNIST images are 28x28. Total pixels in input layer is 28x28 = 784
num_inputs = 784
diff --git a/docs/tutorials/nlp/cnn.md b/docs/tutorials/nlp/cnn.md
index e671de3..105bf03 100644
--- a/docs/tutorials/nlp/cnn.md
+++ b/docs/tutorials/nlp/cnn.md
@@ -300,7 +300,7 @@ import time
CNNModel = namedtuple("CNNModel", ['cnn_exec', 'symbol', 'data', 'label',
'param_blocks'])
# Define what device to train/test on, use GPU if available
-ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()
+ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()
arg_names = cnn.list_arguments()
diff --git a/docs/tutorials/python/kvstore.md b/docs/tutorials/python/kvstore.md
index 42debab..4807475 100644
--- a/docs/tutorials/python/kvstore.md
+++ b/docs/tutorials/python/kvstore.md
@@ -57,9 +57,9 @@ values and then push the aggregated value:
```python
# The numbers used below assume 4 GPUs
-gpus = mx.test_utils.list_gpus()
-if len(gpus) > 1:
- contexts = [mx.gpu(i) for i in gpus]
+gpus = mx.context.num_gpus()
+if gpus > 0:
+ contexts = [mx.gpu(i) for i in range(gpus)]
else:
contexts = [mx.cpu(i) for i in range(4)]
b = [mx.nd.ones(shape, ctx) for ctx in contexts]
@@ -173,4 +173,4 @@ When the distributed version is ready, we will update this
section.
## Next Steps
* [MXNet tutorials index](http://mxnet.io/tutorials/index.html)
-<!-- INSERT SOURCE DOWNLOAD BUTTONS -->
\ No newline at end of file
+<!-- INSERT SOURCE DOWNLOAD BUTTONS -->
diff --git a/docs/tutorials/python/mnist.md b/docs/tutorials/python/mnist.md
index 9d641b3..ac96561 100644
--- a/docs/tutorials/python/mnist.md
+++ b/docs/tutorials/python/mnist.md
@@ -50,7 +50,7 @@ mnist = mx.test_utils.get_mnist()
mx.random.seed(42)
# Set the compute context, GPU is available otherwise CPU
-ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()
+ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()
```
After running the above source code, the entire MNIST dataset should be fully
loaded into memory. Note that for large datasets it is not feasible to pre-load
the entire dataset first like we did here. What is needed is a mechanism by
which we can quickly and efficiently stream data directly from the source.
MXNet Data iterators come to the rescue here by providing exactly that. Data
iterator is the mechanism by which we feed input data into an MXNet training
algorithm and they are very s [...]
diff --git a/docs/tutorials/python/profiler.md
b/docs/tutorials/python/profiler.md
index d3e3355..8080309 100644
--- a/docs/tutorials/python/profiler.md
+++ b/docs/tutorials/python/profiler.md
@@ -111,7 +111,7 @@ Let's define a method that will run one training iteration
given data and label.
```python
# Use GPU if available
-if len(mx.test_utils.list_gpus())!=0:
+if mx.context.num_gpus():
ctx=mx.gpu()
else:
ctx=mx.cpu()
diff --git a/docs/tutorials/unsupervised_learning/gan.md
b/docs/tutorials/unsupervised_learning/gan.md
index ca0fb15..0416593 100644
--- a/docs/tutorials/unsupervised_learning/gan.md
+++ b/docs/tutorials/unsupervised_learning/gan.md
@@ -240,7 +240,7 @@ sigma = 0.02
lr = 0.0002
beta1 = 0.5
# Define the compute context, use GPU if available
-ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()
+ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()
#=============Generator Module=============
generator = mx.mod.Module(symbol=generatorSymbol, data_names=('rand',),
label_names=None, context=ctx)
diff --git a/example/adversary/adversary_generation.ipynb
b/example/adversary/adversary_generation.ipynb
index 0b45366..76c5f4c 100644
--- a/example/adversary/adversary_generation.ipynb
+++ b/example/adversary/adversary_generation.ipynb
@@ -45,7 +45,7 @@
},
"outputs": [],
"source": [
- "ctx = mx.gpu() if len(mx.test_utils.list_gpus()) else mx.cpu()\n",
+ "ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()\n",
"batch_size = 128"
]
},
diff --git a/example/autoencoder/convolutional_autoencoder.ipynb
b/example/autoencoder/convolutional_autoencoder.ipynb
index c42ad90..a49eba0 100644
--- a/example/autoencoder/convolutional_autoencoder.ipynb
+++ b/example/autoencoder/convolutional_autoencoder.ipynb
@@ -50,7 +50,7 @@
"outputs": [],
"source": [
"batch_size = 512\n",
- "ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu()"
+ "ctx = mx.gpu() if mx.context.num_gpus() else mx.cpu()"
]
},
{
diff --git a/example/bi-lstm-sort/bi-lstm-sort.ipynb
b/example/bi-lstm-sort/bi-lstm-sort.ipynb
index 0851176..5d18be3 100644
--- a/example/bi-lstm-sort/bi-lstm-sort.ipynb
+++ b/example/bi-lstm-sort/bi-lstm-sort.ipynb
@@ -39,7 +39,7 @@
"seq_len = 5\n",
"split = 0.8\n",
"batch_size = 512\n",
- "ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu()"
+ "ctx = mx.gpu() if mx.context.num_gpus() > 0 else mx.cpu()"
]
},
{
diff --git a/example/distributed_training-horovod/gluon_mnist.py
b/example/distributed_training-horovod/gluon_mnist.py
index 753758b..7b39f57 100644
--- a/example/distributed_training-horovod/gluon_mnist.py
+++ b/example/distributed_training-horovod/gluon_mnist.py
@@ -45,7 +45,7 @@ args = parser.parse_args()
if not args.no_cuda:
# Disable CUDA if there are no GPUs.
- if not mx.test_utils.list_gpus():
+ if mx.context.num_gpus() == 0:
args.no_cuda = True
logging.basicConfig(level=logging.INFO)
diff --git a/example/distributed_training-horovod/module_mnist.py
b/example/distributed_training-horovod/module_mnist.py
index 86fbb0f..4fcb02a 100644
--- a/example/distributed_training-horovod/module_mnist.py
+++ b/example/distributed_training-horovod/module_mnist.py
@@ -42,7 +42,7 @@ args = parser.parse_args()
if not args.no_cuda:
# Disable CUDA if there are no GPUs.
- if not mx.test_utils.list_gpus():
+ if mx.context.num_gpus() == 0:
args.no_cuda = True
logging.basicConfig(level=logging.INFO)
diff --git a/example/image-classification/test_score.py
b/example/image-classification/test_score.py
index 0789c92..e41d4e6 100644
--- a/example/image-classification/test_score.py
+++ b/example/image-classification/test_score.py
@@ -51,11 +51,10 @@ def test_imagenet1k_inception_bn(**kwargs):
assert r > g and r < g + .1
if __name__ == '__main__':
- gpus = mx.test_utils.list_gpus()
- assert len(gpus) > 0
- batch_size = 16 * len(gpus)
- gpus = ','.join([str(i) for i in gpus])
-
+ num_gpus = mx.context.num_gpus()
+ assert num_gpus > 0
+ batch_size = 16 * num_gpus
+ gpus = ','.join(map(str, range(num_gpus)))
kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500}
download_data()
test_imagenet1k_resnet(**kwargs)
diff --git a/example/multi-task/multi-task-learning.ipynb
b/example/multi-task/multi-task-learning.ipynb
index 6e03e2b..048d6d9 100644
--- a/example/multi-task/multi-task-learning.ipynb
+++ b/example/multi-task/multi-task-learning.ipynb
@@ -58,7 +58,7 @@
"source": [
"batch_size = 128\n",
"epochs = 5\n",
- "ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu()\n",
+ "ctx = mx.gpu() if mx.context.num_gpus() > 0 else mx.cpu()\n",
"lr = 0.01"
]
},
diff --git a/example/recommenders/demo2-dssm.ipynb
b/example/recommenders/demo2-dssm.ipynb
index 49450c5..d0cd3ed 100644
--- a/example/recommenders/demo2-dssm.ipynb
+++ b/example/recommenders/demo2-dssm.ipynb
@@ -41,7 +41,7 @@
"hidden_units = 128\n",
"epsilon_proj = 0.25\n",
"\n",
- "ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu()"
+ "ctx = mx.gpu() if mx.context.num_gpus() > 0 else mx.cpu()"
]
},
{
diff --git a/example/svm_mnist/svm_mnist.py b/example/svm_mnist/svm_mnist.py
index 3fc0362..e166cb6 100644
--- a/example/svm_mnist/svm_mnist.py
+++ b/example/svm_mnist/svm_mnist.py
@@ -82,7 +82,7 @@ print("Data prepared.")
# Article's suggestion on batch size
batch_size = 200
-ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu()
+ctx = mx.gpu() if mx.context.num_gpus() > 0 else mx.cpu()
results = {}
for output in [mlp_svm_l2, mlp_svm_l1, mlp_softmax]:
@@ -121,4 +121,4 @@ for key, value in results.items():
#svm_l2 97.85 %s
#svm_l1 98.15 %s
-#softmax 97.69 %s
\ No newline at end of file
+#softmax 97.69 %s
diff --git a/python/mxnet/gluon/contrib/nn/basic_layers.py
b/python/mxnet/gluon/contrib/nn/basic_layers.py
index 6cbf988..706e5e4 100644
--- a/python/mxnet/gluon/contrib/nn/basic_layers.py
+++ b/python/mxnet/gluon/contrib/nn/basic_layers.py
@@ -24,7 +24,7 @@ __all__ = ['Concurrent', 'HybridConcurrent', 'Identity',
'SparseEmbedding',
'PixelShuffle3D']
import warnings
-from .... import nd, test_utils
+from .... import nd, context
from ...block import HybridBlock, Block
from ...nn import Sequential, HybridSequential, BatchNorm
@@ -233,7 +233,7 @@ class SyncBatchNorm(BatchNorm):
warnings.warn("Caution using SyncBatchNorm: "
"if not using all the GPUs, please mannually set
num_devices",
UserWarning)
- num_devices = len(test_utils.list_gpus())
+ num_devices = context.num_gpus()
num_devices = num_devices if num_devices > 0 else 1
return num_devices
diff --git a/tests/python/gpu/test_nccl.py b/tests/python/gpu/test_nccl.py
index 40ef6fd..275dae0 100644
--- a/tests/python/gpu/test_nccl.py
+++ b/tests/python/gpu/test_nccl.py
@@ -22,7 +22,7 @@ import os
shapes = [(10), (100), (1000), (10000), (100000), (2,2), (2,3,4,5,6,7,8)]
keys = [1,2,3,4,5,6,7]
-num_gpus = len(mx.test_utils.list_gpus())
+num_gpus = mx.context.num_gpus()
if num_gpus > 8 :
diff --git a/tests/python/profiling/test_nvtx.py
b/tests/python/profiling/test_nvtx.py
index 35b209e..507b438 100644
--- a/tests/python/profiling/test_nvtx.py
+++ b/tests/python/profiling/test_nvtx.py
@@ -25,7 +25,7 @@ from subprocess import Popen, PIPE
def test_nvtx_ranges_present_in_profile():
- if not mx.test_utils.list_gpus():
+ if not mx.context.num_gpus():
unittest.skip('Test only applicable to machines with GPUs')
# Build a system independent wrapper to execute simple_forward with nvprof
diff --git a/tools/caffe_converter/test_converter.py
b/tools/caffe_converter/test_converter.py
index 3c325d6..a4f7445 100644
--- a/tools/caffe_converter/test_converter.py
+++ b/tools/caffe_converter/test_converter.py
@@ -90,9 +90,9 @@ def main():
gpus = [-1]
default_batch_size = 32
else:
- gpus = mx.test_utils.list_gpus()
- assert gpus, 'At least one GPU is needed to run test_converter in GPU
mode'
- default_batch_size = 32 * len(gpus)
+ num_gpus = mx.context.num_gpus()
+ assert num_gpus, 'At least one GPU is needed to run test_converter in
GPU mode'
+ default_batch_size = 32 * num_gpus
models = ['bvlc_googlenet', 'vgg-16', 'resnet-50']