This is an automated email from the ASF dual-hosted git repository.
junrushao pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new 0961b65 [Tutorial][Executor] Fix the usage of executors in tutorials
(#8586)
0961b65 is described below
commit 0961b65cbf0d6e1c5f51e0e88dd17886d6111522
Author: Jiawei Liu <[email protected]>
AuthorDate: Sat Aug 28 04:28:07 2021 -0500
[Tutorial][Executor] Fix the usage of executors in tutorials (#8586)
* fix: executor usage for keras tutorial
* fix: executor usage for onnx tutorial
* [Tutorial][Executor] Fix executors in tutorials
---
tutorials/dev/bring_your_own_datatypes.py | 3 ++-
tutorials/frontend/from_keras.py | 4 ++--
tutorials/frontend/from_onnx.py | 6 ++++--
3 files changed, 8 insertions(+), 5 deletions(-)
diff --git a/tutorials/dev/bring_your_own_datatypes.py
b/tutorials/dev/bring_your_own_datatypes.py
index a5e8e28..1cf556d 100644
--- a/tutorials/dev/bring_your_own_datatypes.py
+++ b/tutorials/dev/bring_your_own_datatypes.py
@@ -257,8 +257,9 @@ module, params = get_mobilenet()
######################################################################
# It's easy to execute MobileNet with native TVM:
+ex = tvm.relay.create_executor("graph", mod=module, params=params)
input = get_cat_image()
-result = tvm.relay.create_executor("graph", mod=module).evaluate()(input,
**params).numpy()
+result = ex.evaluate()(input).numpy()
# print first 10 elements
print(result.flatten()[:10])
diff --git a/tutorials/frontend/from_keras.py b/tutorials/frontend/from_keras.py
index e62836d..182e769 100644
--- a/tutorials/frontend/from_keras.py
+++ b/tutorials/frontend/from_keras.py
@@ -103,14 +103,14 @@ dev = tvm.cuda(0)
# due to a latent bug. Note that the pass context only has an effect within
# evaluate() and is not captured by create_executor().
with tvm.transform.PassContext(opt_level=0):
- model = relay.build_module.create_executor("graph", mod, dev,
target).evaluate()
+ model = relay.build_module.create_executor("graph", mod, dev, target,
params).evaluate()
######################################################################
# Execute on TVM
# ---------------
dtype = "float32"
-tvm_out = model(tvm.nd.array(data.astype(dtype)), **params)
+tvm_out = model(tvm.nd.array(data.astype(dtype)))
top1_tvm = np.argmax(tvm_out.numpy()[0])
#####################################################################
diff --git a/tutorials/frontend/from_onnx.py b/tutorials/frontend/from_onnx.py
index 890bfba..fd51d7a 100644
--- a/tutorials/frontend/from_onnx.py
+++ b/tutorials/frontend/from_onnx.py
@@ -92,13 +92,15 @@ shape_dict = {input_name: x.shape}
mod, params = relay.frontend.from_onnx(onnx_model, shape_dict)
with tvm.transform.PassContext(opt_level=1):
- compiled = relay.build_module.create_executor("graph", mod, tvm.cpu(0),
target).evaluate()
+ executor = relay.build_module.create_executor(
+ "graph", mod, tvm.cpu(0), target, params
+ ).evaluate()
######################################################################
# Execute on TVM
# ---------------------------------------------
dtype = "float32"
-tvm_output = compiled(tvm.nd.array(x.astype(dtype)), **params).numpy()
+tvm_output = executor(tvm.nd.array(x.astype(dtype))).numpy()
######################################################################
# Display results