Repository: incubator-singa
Updated Branches:
  refs/heads/master 6d7d629bf -> 0cf960c96


SINGA-363 Fix some bugs in imagenet examples


Project: http://git-wip-us.apache.org/repos/asf/incubator-singa/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-singa/commit/dd58f49a
Tree: http://git-wip-us.apache.org/repos/asf/incubator-singa/tree/dd58f49a
Diff: http://git-wip-us.apache.org/repos/asf/incubator-singa/diff/dd58f49a

Branch: refs/heads/master
Commit: dd58f49aca8d3ac1b837aa3af159e38255d0e48f
Parents: 6bcd5d0
Author: Wentong-DST <[email protected]>
Authored: Thu May 17 11:11:46 2018 +0800
Committer: Wentong-DST <[email protected]>
Committed: Thu May 17 11:11:46 2018 +0800

----------------------------------------------------------------------
 examples/imagenet/googlenet/serve.py   |  6 ++++--
 examples/imagenet/inception/convert.py |  9 ++++++---
 examples/imagenet/inception/serve.py   | 24 +++++++++++++++++-------
 examples/imagenet/resnet/convert.py    | 23 +++++++++++++++--------
 examples/imagenet/vgg/README.md        |  4 ++--
 examples/imagenet/vgg/convert.py       |  9 +++++++--
 examples/imagenet/vgg/serve.py         |  3 ++-
 7 files changed, 53 insertions(+), 25 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd58f49a/examples/imagenet/googlenet/serve.py
----------------------------------------------------------------------
diff --git a/examples/imagenet/googlenet/serve.py 
b/examples/imagenet/googlenet/serve.py
index 308acd6..35b0c86 100644
--- a/examples/imagenet/googlenet/serve.py
+++ b/examples/imagenet/googlenet/serve.py
@@ -36,6 +36,7 @@ from singa import layer
 from singa import net as ffnet
 from singa import device
 from singa import tensor
+
 from rafiki.agent import Agent, MsgType
 
 
@@ -186,7 +187,8 @@ def serve(agent, use_cpu, parameter_file, topk=5):
                 img[:, :, 2] -= 103.939
                 img[:,:,[0,1,2]] = img[:,:,[2,1,0]]
                 img = img.transpose((2, 0, 1))
-                img = 
img[:,(height-224)//2:(height+224)//2,(width-224)//2:(width+224)//2]
+                img = img[:, (height-224)//2:(height+224)//2,\
+                          (width-224)//2:(width+224)//2]
                 images = np.expand_dims(img, axis=0)
 
                 x = tensor.from_numpy(images.astype(np.float32))
@@ -216,7 +218,7 @@ def serve(agent, use_cpu, parameter_file, topk=5):
 def main():
     try:
         # Setup argument parser
-        parser = ArgumentParser(description="GooleNet for image 
classification")
+        parser = ArgumentParser(description="GoogleNet for image 
classification")
         parser.add_argument("-p", "--port", default=9999, help="listen port")
         parser.add_argument("-C", "--use_cpu", action="store_true")
         parser.add_argument("--parameter_file", 
default="bvlc_googlenet.pickle",

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd58f49a/examples/imagenet/inception/convert.py
----------------------------------------------------------------------
diff --git a/examples/imagenet/inception/convert.py 
b/examples/imagenet/inception/convert.py
index c9f92e4..6406b70 100644
--- a/examples/imagenet/inception/convert.py
+++ b/examples/imagenet/inception/convert.py
@@ -19,7 +19,6 @@ from __future__ import print_function
 
 import argparse
 import sys
-import cPickle as pickle
 import os
 
 import numpy as np
@@ -28,6 +27,10 @@ from tensorflow.python.platform import app
 import inception_v4
 import inception_v3
 
+try:
+    import cPickle as pickle
+except ModuleNotFoundError:
+    import pickle
 
 FLAGS = None
 
@@ -96,8 +99,8 @@ def convert(model, file_name):
             (any([e in file_name for e in [".index", ".meta", ".data"]]))):
             proposed_file = ".".join(file_name.split(".")[0:-1])
             v2_file_error_template = """
-    It's likely that this is a V2 checkpoint and you need to provide the 
filename
-    *prefix*.  Try removing the '.' and extension.  Try:
+    It's likely that this is a V2 checkpoint and you need to provide
+    the filename *prefix*.  Try removing the '.' and extension.  Try:
     inspect checkpoint --file_name = {}"""
         print(v2_file_error_template.format(proposed_file))
 

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd58f49a/examples/imagenet/inception/serve.py
----------------------------------------------------------------------
diff --git a/examples/imagenet/inception/serve.py 
b/examples/imagenet/inception/serve.py
index 9ba099a..62b719b 100644
--- a/examples/imagenet/inception/serve.py
+++ b/examples/imagenet/inception/serve.py
@@ -14,8 +14,6 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 # =============================================================================
-import model
-
 from singa import device
 from singa import tensor
 from singa import image_tool
@@ -28,8 +26,11 @@ import traceback
 from argparse import ArgumentParser
 import numpy as np
 
+import inception_v4
+import inception_v3
+
 
-def serve(agent, use_cpu, parameter_file, topk=5):
+def serve(agent, net, use_cpu, parameter_file, topk=5):
     if use_cpu:
         print('running with cpu')
         dev = device.get_default_device()
@@ -40,6 +41,11 @@ def serve(agent, use_cpu, parameter_file, topk=5):
     agent = agent
 
     print('Start intialization............')
+    # fix the bug when creating net
+    if net == 'v3':
+        model = inception_v3
+    else:
+        model = inception_v4
     net, _ = model.create_net(is_training=False)
     net.load(parameter_file, use_pickle=True)
     net.to_device(dev)
@@ -61,7 +67,9 @@ def serve(agent, use_cpu, parameter_file, topk=5):
                 height, width = img.size[0], img.size[1]
                 print(img.size)
                 crop_h, crop_w = int(height * ratio), int(width * ratio)
-                img = np.array(image_tool.crop(img, (crop_h, crop_w), 
'center').resize((299, 299))).astype(np.float32) / float(255)
+                img = np.array(image_tool.crop(img,\
+                      (crop_h, crop_w), 'center').\
+                      resize((299, 299))).astype(np.float32) / float(255)
                 img -= 0.5
                 img *= 2
                 # img[:,:,[0,1,2]] = img[:,:,[2,1,0]]
@@ -94,11 +102,13 @@ def serve(agent, use_cpu, parameter_file, topk=5):
 def main():
     try:
         # Setup argument parser
-        parser = ArgumentParser(description="InceptionV4 for image 
classification")
+        parser = ArgumentParser(description=\
+                                "InceptionV3 and V4 for image classification")
+        parser.add_argument("--model", choices=['v3', 'v4'], default='v4')
         parser.add_argument("-p", "--port", default=9999, help="listen port")
         parser.add_argument("-C", "--use_cpu", action="store_true")
         parser.add_argument("--parameter_file", default="inception_v4.pickle",
-                help="relative path")
+                            help="relative path")
 
         # Process arguments
         args = parser.parse_args()
@@ -106,7 +116,7 @@ def main():
 
         # start to train
         agent = Agent(port)
-        serve(agent, args.use_cpu, args.parameter_file)
+        serve(agent, args.model, args.use_cpu, args.parameter_file)
         agent.stop()
 
     except SystemExit:

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd58f49a/examples/imagenet/resnet/convert.py
----------------------------------------------------------------------
diff --git a/examples/imagenet/resnet/convert.py 
b/examples/imagenet/resnet/convert.py
index 7c98139..a851210 100644
--- a/examples/imagenet/resnet/convert.py
+++ b/examples/imagenet/resnet/convert.py
@@ -20,11 +20,15 @@ using cPickle'''
 import os
 import torchfile
 import numpy as np
-import cPickle as pickle
 from argparse import ArgumentParser
 
 import model
 
+try:
+    import cPickle as pickle
+except ModuleNotFoundError:
+    import pickle
+
 verbose=False
 
 def add_param(idx, name, val, params):
@@ -32,11 +36,13 @@ def add_param(idx, name, val, params):
         assert name not in params, 'duplicated param %s' % name
         params[name] = val
     else:
-        assert params[idx].size() == val.size, 'size mismatch for %s: %s - %s' 
% (name, (params[idx].shape,), (val.shape,))
+        assert params[idx].size() == val.size,\
+        'size mismatch for %s: %s - %s'\
+        % (name, (params[idx].shape,), (val.shape,))
         params[idx].copy_from_numpy(val)
 
     if verbose:
-        print name, val.shape
+        print(name, val.shape)
 
 
 def conv(m, idx, params, param_names):
@@ -90,10 +96,11 @@ def traverse(m, idx, params, param_names):
 
 if __name__ == '__main__':
     parser = ArgumentParser(description='Convert params from torch to python '
-            'dict. \n resnet could have depth of 18, 34, 101, 152; \n wrn has 
depth 50; preact has depth 200; addbn has depth 50')
+            'dict. \n resnet could have depth of 18, 34, 101, 152; \n'
+            'wrn has depth 50; preact has depth 200; addbn has depth 50')
     parser.add_argument("infile", help="torch checkpoint file")
-    parser.add_argument("model", choices = ['resnet', 'wrn', 'preact', 
'addbn'])
-    parser.add_argument("depth", type=int, choices = [18, 34, 50, 101, 152, 
200])
+    parser.add_argument("model", choices=['resnet', 'wrn', 'preact', 'addbn'])
+    parser.add_argument("depth", type=int, choices=[18, 34, 50, 101, 152, 200])
     args = parser.parse_args()
 
     net = model.create_net(args.model, args.depth)
@@ -105,8 +112,8 @@ if __name__ == '__main__':
     traverse(m, 0, params, param_names)
     miss = [name for name in param_names if name not in params]
     if len(miss) > 0:
-        print 'The following params are missing from torch file'
-        print miss
+        print('The following params are missing from torch file')
+        print(miss)
 
     outfile = os.path.splitext(args.infile)[0] + '.pickle'
     with open(outfile, 'wb') as fd:

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd58f49a/examples/imagenet/vgg/README.md
----------------------------------------------------------------------
diff --git a/examples/imagenet/vgg/README.md b/examples/imagenet/vgg/README.md
index 2c99902..b6236cf 100644
--- a/examples/imagenet/vgg/README.md
+++ b/examples/imagenet/vgg/README.md
@@ -26,9 +26,9 @@ to SINGA for image classification.
 * Example
 
         # use cpu
-        $ python serve.py --use_cpu --parameter_file vgg11.pickle --depth 11 
--use_cpu &
-        # use gpu
         $ python serve.py --use_cpu --parameter_file vgg11.pickle --depth 11 &
+        # use gpu
+        $ python serve.py --parameter_file vgg11.pickle --depth 11 &
 
   The parameter files for the following model and depth configuration pairs 
are provided:
   * Without batch-normalization, 
[11](https://s3-ap-southeast-1.amazonaws.com/dlfile/vgg/vgg11.tar.gz), 
[13](https://s3-ap-southeast-1.amazonaws.com/dlfile/vgg/vgg13.tar.gz), 
[16](https://s3-ap-southeast-1.amazonaws.com/dlfile/vgg/vgg16.tar.gz), 
[19](https://s3-ap-southeast-1.amazonaws.com/dlfile/vgg/vgg19.tar.gz)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd58f49a/examples/imagenet/vgg/convert.py
----------------------------------------------------------------------
diff --git a/examples/imagenet/vgg/convert.py b/examples/imagenet/vgg/convert.py
index bd90a87..8786647 100644
--- a/examples/imagenet/vgg/convert.py
+++ b/examples/imagenet/vgg/convert.py
@@ -21,11 +21,15 @@ as python dict using cPickle. Must install pytorch.
 import torch.utils.model_zoo as model_zoo
 
 import numpy as np
-import cPickle as pickle
 from argparse import ArgumentParser
 
 import model
 
+try:
+    import cPickle as pickle
+except ModuleNotFoundError:
+    import pickle
+
 
 model_urls = {
     'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
@@ -84,7 +88,8 @@ if __name__ == '__main__':
                 params[pname] = np.transpose(ary)
         else:
             print('param=%s is missing in the ckpt file' % pname)
-        assert pval.shape == params[pname].shape, 'shape mismatch for %s' % 
pname
+        assert pval.shape == params[pname].shape,\
+               'shape mismatch for %s' % pname
 
     with open(args.outfile, 'wb') as fd:
         pickle.dump(params, fd)

http://git-wip-us.apache.org/repos/asf/incubator-singa/blob/dd58f49a/examples/imagenet/vgg/serve.py
----------------------------------------------------------------------
diff --git a/examples/imagenet/vgg/serve.py b/examples/imagenet/vgg/serve.py
index 9cdfbd9..b611ae7 100644
--- a/examples/imagenet/vgg/serve.py
+++ b/examples/imagenet/vgg/serve.py
@@ -63,7 +63,8 @@ def serve(net, label_map, dev, agent, topk=5):
                 img -= mean
                 img /= std
                 img = img.transpose((2, 0, 1))
-                img = 
img[:,(height-224)//2:(height+224)//2,(width-224)//2:(width+224)//2]
+                img = img[:,\
+                (height-224)//2:(height+224)//2,(width-224)//2:(width+224)//2]
                 images.copy_from_numpy(img)
                 print("input: ", images.l1())
                 # do prediction

Reply via email to