leandron commented on a change in pull request #6578: URL: https://github.com/apache/incubator-tvm/pull/6578#discussion_r496771378
########## File path: python/tvm/driver/tvmc/runner.py ########## @@ -0,0 +1,450 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Provides support to run compiled networks both locally and remotely. +""" +import json +import logging +import os +import tarfile +import tempfile + +import numpy as np +import tvm +from tvm import rpc +from tvm.autotvm.measure import request_remote +from tvm.contrib import graph_runtime as runtime +from tvm.contrib.debugger import debug_runtime + +from . import common +from .common import TVMCException +from .main import register_parser + + +# pylint: disable=invalid-name +logger = logging.getLogger("TVMC") + + +@register_parser +def add_run_parser(subparsers): + """ Include parser for 'run' subcommand """ + + parser = subparsers.add_parser("run", help="run a compiled module") + parser.set_defaults(func=drive_run) + + # TODO --device needs to be extended and tested to support other targets, + # like 'cl', 'webgpu', etc (@leandron) + parser.add_argument( + "--device", + choices=["cpu", "gpu"], + default="cpu", + help="target device to run the compiled module", + ) + parser.add_argument( + "--fill-mode", + choices=["zeros", "ones", "random"], + default="zeros", + help="fill all input tensors with values", + ) + parser.add_argument("-i", "--inputs", help="path to the .npz input file") + parser.add_argument("-o", "--outputs", help="path to the .npz output file") + parser.add_argument( + "--print-time", action="store_true", help="record and print the execution time(s)" + ) + parser.add_argument( + "--print-top", + metavar="N", + type=int, + help="print the top n values and indices of the output tensor", + ) + parser.add_argument( + "--profile", action="store_true", help="generate profiling data from the runtime execution" + ) + parser.add_argument("--repeat", metavar="N", type=int, default=1, help="repeat the run n times") + parser.add_argument( + "--rpc-key", + nargs=1, + help="the RPC tracker key of the target device", + ) + parser.add_argument( + "--rpc-tracker", + nargs=1, + help="hostname (required) and port (optional, defaults to 9090) of the RPC tracker, " + "e.g. '192.168.0.100:9999'", + ) + parser.add_argument("FILE", help="path to the compiled module file") + + +def drive_run(args): + """Invoke runner module with command line arguments + + Parameters + ---------- + args: argparse.Namespace + Arguments from command line parser. + """ + inputs = {} + if args.inputs: + inputs = np.load(args.inputs) Review comment: The practical issue moving the fill logic here is that we need the shapes of all inputs, and at this point we didn't load the model. Once we have all this information, we actually have the logic within `make_inputs_dict`. I suggest we keep it this way, because it simplifies a bit for testing, having `run_module` with an `input` parameter, that allows us to inject inputs. If you feel the design is too bad, I can try to find alternatives. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
