This is an automated email from the ASF dual-hosted git repository.
ruihangl pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git
The following commit(s) were added to refs/heads/main by this push:
new befee5827e [Docs] Fix outdated code examples, typos, and missing API
reference in documentation(2) (#19344)
befee5827e is described below
commit befee5827eb6816c37c7bce1fe70c37182f7cc14
Author: Shushi Hong <[email protected]>
AuthorDate: Sat Apr 4 22:55:19 2026 -0400
[Docs] Fix outdated code examples, typos, and missing API reference in
documentation(2) (#19344)
This PR is a follow-up of #18965
- Fix incorrect variable names in Relax dataflow code example (`lv0` →
`lv`, `b` → `n`) in
`docs/deep_dive/relax/learning.rst`
- Fix `func.time_evaluator(func.entry_name, ...)` to
`func.time_evaluator("add_one", ...)`
in `docs/how_to/tutorials/cross_compilation_and_rpc.py`, since
`entry_name` is a class
constant `"main"` but the compiled function is named `"add_one"`
- Fix typo `tvfm.testing` → `tvm.testing` in
`docs/how_to/dev/pytest_target_parametrization.rst`
- Add missing `tvm.relax.frontend.tflite` automodule entry to
`docs/reference/api/python/relax/frontend.rst`
---
docs/conf.py | 6 +++---
docs/deep_dive/relax/learning.rst | 4 ++--
docs/how_to/dev/pytest_target_parametrization.rst | 2 +-
docs/how_to/tutorials/cross_compilation_and_rpc.py | 2 +-
docs/how_to/tutorials/export_and_load_executable.py | 10 +++++-----
docs/how_to/tutorials/optimize_llm.py | 4 ++--
docs/reference/api/python/relax/frontend.rst | 6 ++++++
docs/reference/api/python/topi.rst | 2 +-
8 files changed, 21 insertions(+), 15 deletions(-)
diff --git a/docs/conf.py b/docs/conf.py
index 1502c72a2e..182e799ef5 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -58,7 +58,7 @@ sys.path.insert(0, str(tvm_path.resolve() / "docs"))
# General information about the project.
project = "tvm"
author = "Apache Software Foundation"
-copyright = f"2020 - 2023, {author}"
+copyright = f"2020 - 2026, {author}"
github_doc_root = "https://github.com/apache/tvm/tree/main/docs/"
os.environ["TVM_BUILD_DOC"] = "1"
@@ -486,10 +486,10 @@ tvm_alias_check_map = {
## Setup header and other configs
import tlcpack_sphinx_addon
-footer_copyright = "© 2024 Apache Software Foundation | All rights reserved"
+footer_copyright = "© 2026 Apache Software Foundation | All rights reserved"
footer_note = " ".join(
"""
-Copyright © 2024 The Apache Software Foundation. Apache TVM, Apache, the
Apache feather,
+Copyright © 2026 The Apache Software Foundation. Apache TVM, Apache, the
Apache feather,
and the Apache TVM project logo are either trademarks or registered trademarks
of
the Apache Software Foundation.""".split("\n")
).strip()
diff --git a/docs/deep_dive/relax/learning.rst
b/docs/deep_dive/relax/learning.rst
index b24ed26752..5590d62e2c 100644
--- a/docs/deep_dive/relax/learning.rst
+++ b/docs/deep_dive/relax/learning.rst
@@ -239,8 +239,8 @@ Another important element in a relax function is the
R.dataflow() scope annotati
with R.dataflow():
lv = R.call_tir(cls.linear, (x, w0, b0), out_sinfo=R.Tensor((n, 256),
dtype="float32"))
- lv1 = R.call_tir(cls.relu, (lv0,), out_sinfo=R.Tensor((n, 256),
dtype="float32"))
- lv2 = R.call_tir(cls.linear, (lv1, w1, b1), out_sinfo=R.Tensor((b,
10), dtype="float32"))
+ lv1 = R.call_tir(cls.relu, (lv,), out_sinfo=R.Tensor((n, 256),
dtype="float32"))
+ lv2 = R.call_tir(cls.linear, (lv1, w1, b1), out_sinfo=R.Tensor((n,
10), dtype="float32"))
R.output(lv2)
Before we talk about the dataflow block, let us first introduce the concept of
**pure** and
diff --git a/docs/how_to/dev/pytest_target_parametrization.rst
b/docs/how_to/dev/pytest_target_parametrization.rst
index 3fbb69401d..b29419a105 100644
--- a/docs/how_to/dev/pytest_target_parametrization.rst
+++ b/docs/how_to/dev/pytest_target_parametrization.rst
@@ -144,7 +144,7 @@ marks are as follows.
and additionally marks that the test should be skipped
(``@pytest.mark.skipif``) entirely if no GPU is present.
-- ``@tvfm.testing.requires_RUNTIME`` - Several decorators
+- ``@tvm.testing.requires_RUNTIME`` - Several decorators
(e.g. ``@tvm.testing.requires_cuda``), each of which skips a test if
the specified runtime cannot be used. A runtime cannot be used if it
is disabled in the ``config.cmake``, or if a compatible device is
diff --git a/docs/how_to/tutorials/cross_compilation_and_rpc.py
b/docs/how_to/tutorials/cross_compilation_and_rpc.py
index e4383278e8..f573dfc7ce 100644
--- a/docs/how_to/tutorials/cross_compilation_and_rpc.py
+++ b/docs/how_to/tutorials/cross_compilation_and_rpc.py
@@ -196,7 +196,7 @@ np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# function over number times, measures the cost per run on the remote
# device and returns the measured cost. Network overhead is excluded.
-time_f = func.time_evaluator(func.entry_name, dev, number=10)
+time_f = func.time_evaluator("add_one", dev, number=10)
cost = time_f(a, b).mean
print(f"{cost:g} secs/op")
diff --git a/docs/how_to/tutorials/export_and_load_executable.py
b/docs/how_to/tutorials/export_and_load_executable.py
index 1b4092125b..7378b3c71c 100644
--- a/docs/how_to/tutorials/export_and_load_executable.py
+++ b/docs/how_to/tutorials/export_and_load_executable.py
@@ -263,13 +263,13 @@ if RUN_EXAMPLE:
#
# # Step 6: Extract result (output may be tuple or single Tensor)
# # PyTorch models typically return tuples, ONNX models may return a single
Tensor
-# if isinstance(tvm_output, tvm.ir.Array) and len(tvm_output) > 0:
-# result_tensor = tvm_output[0]
+# if isinstance(output, tvm.ir.Array) and len(output) > 0:
+# result_tensor = output[0]
# else:
-# result_tensor = tvm_output
+# result_tensor = output
#
-# print("Prediction shape:", result.shape)
-# print("Predicted class:", np.argmax(result.numpy()))
+# print("Prediction shape:", result_tensor.shape)
+# print("Predicted class:", np.argmax(result_tensor.numpy()))
#
# **Running on GPU:**
# To run on GPU instead of CPU, make the following changes:
diff --git a/docs/how_to/tutorials/optimize_llm.py
b/docs/how_to/tutorials/optimize_llm.py
index c5341266be..58727923a5 100644
--- a/docs/how_to/tutorials/optimize_llm.py
+++ b/docs/how_to/tutorials/optimize_llm.py
@@ -237,7 +237,7 @@ class LlamaModel(nn.Module):
return hidden_states
-class LlamaForCasualLM(nn.Module):
+class LlamaForCausalLM(nn.Module):
def __init__(self, config: LlamaConfig):
self.model = LlamaModel(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size,
bias=False)
@@ -358,7 +358,7 @@ class LlamaForCasualLM(nn.Module):
# For demonstration, we only show the part of the model architecture. and
parameters.
model_config = LlamaConfig()
-model = LlamaForCasualLM(model_config)
+model = LlamaForCausalLM(model_config)
model.to("float16")
mod, named_params = model.export_tvm(spec=model.get_default_spec())
prefill_str = mod["prefill"].script()
diff --git a/docs/reference/api/python/relax/frontend.rst
b/docs/reference/api/python/relax/frontend.rst
index c037f323ed..7f3a2658b9 100644
--- a/docs/reference/api/python/relax/frontend.rst
+++ b/docs/reference/api/python/relax/frontend.rst
@@ -41,6 +41,12 @@ tvm.relax.frontend.stablehlo
:members:
:imported-members:
+tvm.relax.frontend.tflite
+*************************
+.. automodule:: tvm.relax.frontend.tflite
+ :members:
+ :imported-members:
+
tvm.relax.frontend.torch
************************
.. automodule:: tvm.relax.frontend.torch
diff --git a/docs/reference/api/python/topi.rst
b/docs/reference/api/python/topi.rst
index ce44e07ded..e8ba9edd5f 100644
--- a/docs/reference/api/python/topi.rst
+++ b/docs/reference/api/python/topi.rst
@@ -20,7 +20,7 @@ tvm.topi
.. automodule:: tvm.topi
:members:
:imported-members:
- :noindex: AssertStmt
+ :noindex:
:autosummary:
tvm.topi.nn