This is an automated email from the ASF dual-hosted git repository.

haibin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 6c00a5a  update LICENSE (#15128)
6c00a5a is described below

commit 6c00a5adaf3d8935c79a401348159a458fcee310
Author: Lai Wei <[email protected]>
AuthorDate: Thu Jun 6 19:55:43 2019 -0700

    update LICENSE (#15128)
    
    * update license
    
    * update license
    
    * fix typo
    
    * update license
    
    * add comment
    
    * Update example/gluon/style_transfer/dataset/download_dataset.py
    
    Co-Authored-By: Zach Kimberg <[email protected]>
    
    * Update example/gluon/embedding_learning/get_cub200_data.sh
    
    Co-Authored-By: Zach Kimberg <[email protected]>
    
    * update license
    
    * add license
    
    * trigger ci
    
    * fix large tensor
    
    * update copy right
    
    * fix wrong commit
    
    * fix
    
    * trigger
---
 cpp-package/example/get_data.sh                          |  9 +++++----
 example/cnn_text_classification/README.md                |  4 ++++
 example/cnn_text_classification/data_helpers.py          |  3 +++
 example/gluon/embedding_learning/README.md               |  4 ++++
 example/gluon/embedding_learning/get_cub200_data.sh      |  6 +++++-
 example/gluon/style_transfer/README.md                   |  4 ++++
 example/gluon/style_transfer/dataset/download_dataset.py |  4 ++++
 example/gluon/super_resolution/README.md                 | 11 +++++++++++
 example/gluon/super_resolution/super_resolution.py       |  4 +++-
 example/model-parallel/matrix_factorization/get_data.py  |  4 ++++
 example/recommenders/README.md                           |  3 +++
 example/recommenders/movielens_data.py                   |  4 ++++
 example/sparse/matrix_factorization/data.py              |  4 ++++
 tests/nightly/estimator/test_sentiment_rnn.py            | 15 ++++++++++++++-
 14 files changed, 72 insertions(+), 7 deletions(-)

diff --git a/cpp-package/example/get_data.sh b/cpp-package/example/get_data.sh
index b0913bd..e110772 100755
--- a/cpp-package/example/get_data.sh
+++ b/cpp-package/example/get_data.sh
@@ -51,11 +51,12 @@ download () {
     (($? != 0)) && exit 1 || return 0
 }
 
+# MNIST dataset from: http://yann.lecun.com/exdb/mnist/
 FILES=(
-    
"https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/mnist/train-images-idx3-ubyte.gz";
-    
"https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/mnist/train-labels-idx1-ubyte.gz";
-    
"https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/mnist/t10k-images-idx3-ubyte.gz";
-    
"https://apache-mxnet.s3-accelerate.dualstack.amazonaws.com/gluon/dataset/mnist/t10k-labels-idx1-ubyte.gz";
+    "http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz";
+    "http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz";
+    "http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz";
+    "http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz";
     "http://data.mxnet.io/data/mnist_train.csv.gz";)
 
 for FILE in ${FILES[@]}; do
diff --git a/example/cnn_text_classification/README.md 
b/example/cnn_text_classification/README.md
index f4ebc43..b9e3721 100644
--- a/example/cnn_text_classification/README.md
+++ b/example/cnn_text_classification/README.md
@@ -29,6 +29,10 @@ Finally, I got a best dev accuracy 80.1%, close to 81% that 
reported in the orig
 ## Data
 Please download the corpus from this repository 
[cnn-text-classification-tf](https://github.com/dennybritz/cnn-text-classification-tf),
 :)
 
+Note: The dataset is from cnn-text-classification-tf 
[repository](https://github.com/dennybritz/cnn-text-classification-tf/tree/master/data/rt-polaritydata).
+The dataset is copyright to Denny Britz and licensed under Apache License 2.0.
+For full text of the license, see [repository 
license](https://github.com/dennybritz/cnn-text-classification-tf/blob/master/LICENSE)
+    
 'data/rt.vec', this file was trained on the corpus by word2vec tool. I 
recommend to use GoogleNews word2vec, which could get better performance, since
 this corpus is small (contains about 10K sentences).
 
diff --git a/example/cnn_text_classification/data_helpers.py 
b/example/cnn_text_classification/data_helpers.py
index 78dacb0..8e13567 100644
--- a/example/cnn_text_classification/data_helpers.py
+++ b/example/cnn_text_classification/data_helpers.py
@@ -55,6 +55,9 @@ def load_data_and_labels():
     Returns split sentences and labels.
     """
     # Load data from files
+    # The dataset is from 
https://github.com/dennybritz/cnn-text-classification-tf/tree/master/data/rt-polaritydata
+    # The dataset is copyright to Denny Britz and licensed under Apache 
License 2.0.
+    # For full text of the license, see 
https://github.com/dennybritz/cnn-text-classification-tf/blob/master/LICENSE
     pos_path = "./data/rt-polaritydata/rt-polarity.pos"
     neg_path = "./data/rt-polaritydata/rt-polarity.neg"
     if not os.path.exists(pos_path):
diff --git a/example/gluon/embedding_learning/README.md 
b/example/gluon/embedding_learning/README.md
index ce1fb53..ee3a0ea 100644
--- a/example/gluon/embedding_learning/README.md
+++ b/example/gluon/embedding_learning/README.md
@@ -22,6 +22,10 @@ This example implements embedding learning based on a 
Margin-based Loss with dis
 
 ## Usage
 Download the data
+
+Note: the dataset is from [Caltech-UCSD Birds 
200](http://www.vision.caltech.edu/visipedia/CUB-200.html).
+These datasets are copyright Caltech Computational Vision Group and licensed 
CC BY 4.0 Attribution.
+See [original dataset source](http://www.vision.caltech.edu/archive.html) for 
details
 ```bash
 ./get_cub200_data.sh
 ```
diff --git a/example/gluon/embedding_learning/get_cub200_data.sh 
b/example/gluon/embedding_learning/get_cub200_data.sh
index c2f2fe4..4cf83e7 100755
--- a/example/gluon/embedding_learning/get_cub200_data.sh
+++ b/example/gluon/embedding_learning/get_cub200_data.sh
@@ -26,5 +26,9 @@ if [[ ! -d "${DATA_DIR}" ]]; then
   mkdir -p ${DATA_DIR}
 fi
 
+# the dataset is from Caltech-UCSD Birds 200
+# http://www.vision.caltech.edu/visipedia/CUB-200.html
+# These datasets are copyright Caltech Computational Vision Group and licensed 
CC BY 4.0 Attribution.
+# See http://www.vision.caltech.edu/archive.html for details
 wget -P ${DATA_DIR} 
http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz
-cd ${DATA_DIR}; tar -xf CUB_200_2011.tgz
\ No newline at end of file
+cd ${DATA_DIR}; tar -xf CUB_200_2011.tgz
diff --git a/example/gluon/style_transfer/README.md 
b/example/gluon/style_transfer/README.md
index c182f3f..1d4ef43 100644
--- a/example/gluon/style_transfer/README.md
+++ b/example/gluon/style_transfer/README.md
@@ -112,6 +112,10 @@ python main.py optim --content-image 
images/content/venice-boat.jpg --style-imag
 
 ### Train Your Own MSG-Net Model
 0. Download the style images and COCO dataset
+Note: Dataset from [COCO 2014](http://cocodataset.org/#download).
+The dataset annotations and site are Copyright COCO Consortium and licensed CC 
BY 4.0 Attribution.
+The images within the dataset are available under the Flickr Terms of Use.
+See original [dataset source](http://cocodataset.org/#termsofuse) for details
        ```bash
     python download_images.py 
        python dataset/download_dataset.py
diff --git a/example/gluon/style_transfer/dataset/download_dataset.py 
b/example/gluon/style_transfer/dataset/download_dataset.py
index 5380015..6d32d94 100644
--- a/example/gluon/style_transfer/dataset/download_dataset.py
+++ b/example/gluon/style_transfer/dataset/download_dataset.py
@@ -26,6 +26,10 @@ def unzip_file(filename, outpath):
         z.extract(name, outpath)
     fh.close()
 
+# Dataset from COCO 2014: http://cocodataset.org/#download
+# The dataset annotations and site are Copyright COCO Consortium and licensed 
CC BY 4.0 Attribution.
+# The images within the dataset are available under the Flickr Terms of Use.
+# See http://cocodataset.org/#termsofuse for details
 download('http://msvocds.blob.core.windows.net/coco2014/train2014.zip', 
'dataset/train2014.zip')
 download('http://msvocds.blob.core.windows.net/coco2014/val2014.zip', 
'dataset/val2014.zip')
 
diff --git a/example/gluon/super_resolution/README.md 
b/example/gluon/super_resolution/README.md
index ddcbe8b..2239533 100644
--- a/example/gluon/super_resolution/README.md
+++ b/example/gluon/super_resolution/README.md
@@ -17,6 +17,9 @@
 
 # Superresolution
 
+Note: this example use The BSDS500 Dataset which is copyright Berkeley 
Computer Vision Group.
+For more details, see [dataset 
website](https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html#bsds500)
+
 This example trains a convolutional neural network to enhance the resolution 
of images (also known as superresolution). 
 The script takes the following commandline arguments:
 
@@ -43,3 +46,11 @@ Once the network is trained you can use the following 
command to increase the re
 ```
 python  super_resolution.py --resolve_img myimage.jpg
 ```
+
+## Citation
+<b>Contour Detection and Hierarchical Image Segmentation
+P. Arbelaez, M. Maire, C. Fowlkes and J. Malik.
+IEEE TPAMI, Vol. 33, No. 5, pp. 898-916, May 2011.
+[PDF](http://web.archive.org/web/20160306133802/http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/papers/amfm_pami2010.pdf)
+[BibTex](http://web.archive.org/web/20160306133802/http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/papers/amfm_pami2011.bib)
+</b>
\ No newline at end of file
diff --git a/example/gluon/super_resolution/super_resolution.py 
b/example/gluon/super_resolution/super_resolution.py
index 198f6fe..4a3e8d9 100644
--- a/example/gluon/super_resolution/super_resolution.py
+++ b/example/gluon/super_resolution/super_resolution.py
@@ -59,7 +59,9 @@ upscale_factor = opt.upscale_factor
 batch_size, test_batch_size = opt.batch_size, opt.test_batch_size
 color_flag = 0
 
-# Get data
+# Get data from https://github.com/BIDS/BSDS500/
+# The BSDS500 Dataset is copyright Berkeley Computer Vision Group
+# For more details, see 
https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html#bsds500
 datasets_dir = path.expanduser(path.join("~", ".mxnet", "datasets"))
 datasets_tmpdir = path.join(datasets_dir, "tmp")
 dataset_url = "https://github.com/BIDS/BSDS500/archive/master.zip";
diff --git a/example/model-parallel/matrix_factorization/get_data.py 
b/example/model-parallel/matrix_factorization/get_data.py
index bb2503a..775c386 100644
--- a/example/model-parallel/matrix_factorization/get_data.py
+++ b/example/model-parallel/matrix_factorization/get_data.py
@@ -21,6 +21,10 @@ import mxnet as mx
 
 def get_movielens_data(prefix):
     if not os.path.exists("%s.zip" % prefix):
+        # MovieLens 10M dataset from https://grouplens.org/datasets/movielens/
+        # This dataset is copy right to GroupLens Research Group at the 
University of Minnesota,
+        # and licensed under their usage license.
+        # For full text of the license, see 
http://files.grouplens.org/datasets/movielens/ml-10m-README.html
         print("Dataset MovieLens 10M not present. Downloading now ...")
         os.system("wget http://files.grouplens.org/datasets/movielens/%s.zip"; 
% prefix)
         os.system("unzip %s.zip" % prefix)
diff --git a/example/recommenders/README.md b/example/recommenders/README.md
index 806e51f..3adb3f0 100644
--- a/example/recommenders/README.md
+++ b/example/recommenders/README.md
@@ -43,3 +43,6 @@ Thanks to [xlvector](https://github.com/xlvector/) for the 
first Matrix Factoriz
 that provided the basis for these examples.
 
 [MovieLens](http://grouplens.org/datasets/movielens/) data from 
[GroupLens](http://grouplens.org/).
+Note: MovieLens 100K and 10M dataset are copy right to GroupLens Research 
Group at the University of Minnesota,
+and licensed under their usage license. For full text of the usage license, 
see [ml-100k 
license](http://files.grouplens.org/datasets/movielens/ml-100k-README.txt)
+ and [ml-10m 
license](http://files.grouplens.org/datasets/movielens/ml-10m-README.html). 
\ No newline at end of file
diff --git a/example/recommenders/movielens_data.py 
b/example/recommenders/movielens_data.py
index 5f86ad1..e92c73a 100644
--- a/example/recommenders/movielens_data.py
+++ b/example/recommenders/movielens_data.py
@@ -45,6 +45,10 @@ def load_mldataset(filename):
 def ensure_local_data(prefix):
     if not os.path.exists("%s.zip" % prefix):
         print("Downloading MovieLens data: %s" % prefix)
+        # MovieLens 100k dataset from https://grouplens.org/datasets/movielens/
+        # This dataset is copy right to GroupLens Research Group at the 
University of Minnesota,
+        # and licensed under their usage license.
+        # For full text of the usage license, see 
http://files.grouplens.org/datasets/movielens/ml-100k-README.txt
         os.system("wget http://files.grouplens.org/datasets/movielens/%s.zip"; 
% prefix)
         os.system("unzip %s.zip" % prefix)
 
diff --git a/example/sparse/matrix_factorization/data.py 
b/example/sparse/matrix_factorization/data.py
index 049f5c2..8ad3ee8 100644
--- a/example/sparse/matrix_factorization/data.py
+++ b/example/sparse/matrix_factorization/data.py
@@ -19,6 +19,10 @@ import os, logging
 import mxnet as mx
 
 def get_movielens_data(data_dir, prefix):
+    # MovieLens 10M dataset from https://grouplens.org/datasets/movielens/
+    # This dataset is copy right to GroupLens Research Group at the University 
of Minnesota,
+    # and licensed under their usage license.
+    # For full text of the license, see 
http://files.grouplens.org/datasets/movielens/ml-10m-README.html
     if not os.path.exists(os.path.join(data_dir, "ml-10M100K")):
         mx.test_utils.get_zip_data(data_dir,
                                    
"http://files.grouplens.org/datasets/movielens/%s.zip"; % prefix,
diff --git a/tests/nightly/estimator/test_sentiment_rnn.py 
b/tests/nightly/estimator/test_sentiment_rnn.py
index 404bf83..d54af49 100644
--- a/tests/nightly/estimator/test_sentiment_rnn.py
+++ b/tests/nightly/estimator/test_sentiment_rnn.py
@@ -101,7 +101,20 @@ def download_imdb(data_dir='/tmp/data'):
     '''
     Download and extract the IMDB dataset
     '''
-    url = ('http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz')
+    # Large Movie Review Dataset from 
http://ai.stanford.edu/~amaas/data/sentiment/
+    # Note this dataset is copyright to Andrew Maas and Stanford AI Lab
+    # @InProceedings{maas-EtAl:2011:ACL-HLT2011,
+    #   author    = {Maas, Andrew L.  and  Daly, Raymond E.  and  Pham, Peter 
T.  and  Huang, Dan  and  Ng, Andrew Y.  and  Potts, Christopher},
+    #   title     = {Learning Word Vectors for Sentiment Analysis},
+    #   booktitle = {Proceedings of the 49th Annual Meeting of the Association 
for Computational Linguistics: Human Language Technologies},
+    #   month     = {June},
+    #   year      = {2011},
+    #   address   = {Portland, Oregon, USA},
+    #   publisher = {Association for Computational Linguistics},
+    #   pages     = {142--150},
+    #   url       = {http://www.aclweb.org/anthology/P11-1015}
+    # }
+    url = 
('https://aws-ml-platform-datasets.s3.amazonaws.com/imdb/aclImdb_v1.tar.gz')
     sha1 = '01ada507287d82875905620988597833ad4e0903'
     if not os.path.exists(data_dir):
         os.makedirs(data_dir)

Reply via email to