This is an automated email from the ASF dual-hosted git repository.

echuraev pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 958c27123a [Fix] Remove duplicated words from comments, NFC (#15873)
958c27123a is described below

commit 958c27123a45a9629e57cee20dbca28263c836bd
Author: Krzysztof Parzyszek <[email protected]>
AuthorDate: Thu Oct 5 00:37:35 2023 -0500

    [Fix] Remove duplicated words from comments, NFC (#15873)
    
    Removed instances of accidentally repeated words from comments. There
    are cases where duplicated words appear legitimately, those cases remain
    unmodified.
---
 docs/Doxyfile                                                    | 2 +-
 docs/conf.py                                                     | 4 ++--
 include/tvm/runtime/logging.h                                    | 2 +-
 include/tvm/runtime/ndarray.h                                    | 2 +-
 include/tvm/runtime/packed_func.h                                | 2 +-
 include/tvm/tir/expr.h                                           | 2 +-
 include/tvm/tir/stmt.h                                           | 2 +-
 python/tvm/relay/op/contrib/clml.py                              | 2 +-
 python/tvm/relay/transform/memory_plan.py                        | 2 +-
 python/tvm/runtime/ndarray.py                                    | 2 +-
 python/tvm/te/hybrid/__init__.py                                 | 2 +-
 python/tvm/te/schedule.py                                        | 2 +-
 python/tvm/topi/arm_cpu/qnn.py                                   | 2 +-
 src/arith/const_fold.h                                           | 4 ++--
 src/arith/product_normal_form.h                                  | 4 ++--
 src/relay/collage/partition_rule.h                               | 2 +-
 src/relay/transforms/combine_parallel_op_batch.h                 | 2 +-
 src/runtime/c_runtime_api.cc                                     | 2 +-
 src/runtime/crt/aot_executor/aot_executor.c                      | 2 +-
 src/target/source/ptx.h                                          | 2 +-
 src/tir/analysis/control_flow_graph.h                            | 2 +-
 src/tir/schedule/error.h                                         | 2 +-
 src/tir/transforms/unroll_loop.cc                                | 2 +-
 tests/python/contrib/test_arm_compute_lib/infrastructure.py      | 2 +-
 tests/python/contrib/test_hexagon/conv2d/test_conv2d_conv2d.md   | 2 +-
 tests/python/contrib/test_hexagon/test_benchmark_elemwise_add.py | 2 +-
 tests/python/relay/test_pass_plan_devices.py                     | 2 +-
 tests/python/unittest/test_tir_transform_remove_no_op.py         | 2 +-
 web/src/environment.ts                                           | 2 +-
 29 files changed, 32 insertions(+), 32 deletions(-)

diff --git a/docs/Doxyfile b/docs/Doxyfile
index 8615f5b2ae..d1ea82c842 100644
--- a/docs/Doxyfile
+++ b/docs/Doxyfile
@@ -306,7 +306,7 @@ EXTENSION_MAPPING      =
 
 # When enabled doxygen tries to link words that correspond to documented
 # classes, or namespaces to their corresponding documentation. Such a link can
-# be prevented in individual cases by by putting a % sign in front of the word
+# be prevented in individual cases by putting a % sign in front of the word
 # or globally by setting AUTOLINK_SUPPORT to NO.
 # The default value is: YES.
 
diff --git a/docs/conf.py b/docs/conf.py
index 330ae919a6..b1ecb37c97 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -244,7 +244,7 @@ INSTALL_TVM_CUDA_DEV = f"""\
 # Installs the latest dev build of TVM from PyPI, with CUDA enabled. To use 
this,
 # you must request a Google Colab instance with a GPU by going to Runtime ->
 # Change runtime type -> Hardware accelerator -> GPU. If you wish to build from
-# source, see see https://tvm.apache.org/docs/install/from_source.html
+# source, see https://tvm.apache.org/docs/install/from_source.html
 pip install tlcpack-nightly-cu113 --pre -f https://tlcpack.ai/wheels""";
 
 INSTALL_TVM_CUDA_FIXED = f"""\
@@ -252,7 +252,7 @@ INSTALL_TVM_CUDA_FIXED = f"""\
 # Installs TVM version {version} from PyPI, with CUDA enabled. To use this,
 # you must request a Google Colab instance with a GPU by going to Runtime ->
 # Change runtime type -> Hardware accelerator -> GPU. If you wish to build from
-# source, see see https://tvm.apache.org/docs/install/from_source.html
+# source, see https://tvm.apache.org/docs/install/from_source.html
 pip install apache-tvm-cu113=={version} -f https://tlcpack.ai/wheels""";
 
 
diff --git a/include/tvm/runtime/logging.h b/include/tvm/runtime/logging.h
index 16a176a801..45c390df1d 100644
--- a/include/tvm/runtime/logging.h
+++ b/include/tvm/runtime/logging.h
@@ -113,7 +113,7 @@
  * in a function, or 'continue' or 'break' in a loop)
  * The default behavior when quit_on_assertion is false, is to 'return false'. 
If this is not
  * desirable, the macro caller can pass one more last parameter to COND_X to 
tell COND_X what
- * to do when when quit_on_assertion is false and the assertion fails.
+ * to do when quit_on_assertion is false and the assertion fails.
  *
  * Rationale: These macros were designed to implement functions that have two 
behaviors
  * in a concise way. Those behaviors are quitting on assertion failures, or 
trying to
diff --git a/include/tvm/runtime/ndarray.h b/include/tvm/runtime/ndarray.h
index 2a06856fea..8400344bf5 100644
--- a/include/tvm/runtime/ndarray.h
+++ b/include/tvm/runtime/ndarray.h
@@ -275,7 +275,7 @@ class NDArray::ContainerBase {
  protected:
   /*!
    * \brief The shape container,
-   *  can be used used for shape data.
+   *  can be used for shape data.
    */
   ShapeTuple shape_;
 };
diff --git a/include/tvm/runtime/packed_func.h 
b/include/tvm/runtime/packed_func.h
index caaaec3640..4159c4b2e7 100644
--- a/include/tvm/runtime/packed_func.h
+++ b/include/tvm/runtime/packed_func.h
@@ -133,7 +133,7 @@ class PackedFuncSubObj : public PackedFuncObj {
  *  The arguments are passed by packed format.
  *
  *  This is an useful unified interface to call generated functions,
- *  It is the unified function function type of TVM.
+ *  It is the unified function type of TVM.
  *  It corresponds to TVMFunctionHandle in C runtime API.
  */
 class PackedFunc : public ObjectRef {
diff --git a/include/tvm/tir/expr.h b/include/tvm/tir/expr.h
index 3286be7b79..4e29eddadd 100644
--- a/include/tvm/tir/expr.h
+++ b/include/tvm/tir/expr.h
@@ -1030,7 +1030,7 @@ class CommReducer : public ObjectRef {
   TVM_DEFINE_OBJECT_REF_METHODS(CommReducer, ObjectRef, CommReducerNode);
 };
 
-/*! \brief Reduction operator operator */
+/*! \brief Reduction operator */
 class ReduceNode : public PrimExprNode {
  public:
   /*! \brief The commutative combiner */
diff --git a/include/tvm/tir/stmt.h b/include/tvm/tir/stmt.h
index 7e40b329d2..bddf87101f 100644
--- a/include/tvm/tir/stmt.h
+++ b/include/tvm/tir/stmt.h
@@ -778,7 +778,7 @@ class SeqStmt : public Stmt {
     }
 
     // If the argument is a single SeqStmt argument with no
-    // flattening or unwrapping required required, then we may
+    // flattening or unwrapping required, then we may
     // return the SeqStmt as-is.
     if constexpr (sizeof...(seq_args) == 1) {
       if (auto opt = Flattener::AsSeqStmt(std::forward<Args>(seq_args)...)) {
diff --git a/python/tvm/relay/op/contrib/clml.py 
b/python/tvm/relay/op/contrib/clml.py
index 82926bb31d..f194dd114b 100644
--- a/python/tvm/relay/op/contrib/clml.py
+++ b/python/tvm/relay/op/contrib/clml.py
@@ -35,7 +35,7 @@ from ..strategy.generic import is_depthwise_conv2d
 
 
 def clml_sdk_version():
-    """Utility function to get clml version version"""
+    """Utility function to get clml version"""
 
     return int(tvm.support.libinfo().get("TVM_CLML_VERSION", 2))
 
diff --git a/python/tvm/relay/transform/memory_plan.py 
b/python/tvm/relay/transform/memory_plan.py
index ca67014730..948a79079e 100644
--- a/python/tvm/relay/transform/memory_plan.py
+++ b/python/tvm/relay/transform/memory_plan.py
@@ -287,7 +287,7 @@ class StorageCoalesce(ExprMutator):
             dynamic_regions.append(lhs)
         else:
             # A new scope is created when entering a new region with different
-            # device device.
+            # device.
             region = self.current_region(dtype)
             if region.device and region.device.device_type != dev.device_type:
                 self.enter_scope()
diff --git a/python/tvm/runtime/ndarray.py b/python/tvm/runtime/ndarray.py
index 6f0d1f440a..d6902bc625 100644
--- a/python/tvm/runtime/ndarray.py
+++ b/python/tvm/runtime/ndarray.py
@@ -617,7 +617,7 @@ def array(arr, device=cpu(0), mem_scope=None):
         The array to be copied from
 
     device : Device, optional
-        The device device to create the array
+        The device to create the array
 
     mem_scope : Optional[str]
         The memory scope of the array
diff --git a/python/tvm/te/hybrid/__init__.py b/python/tvm/te/hybrid/__init__.py
index 9530f0d0ae..cd320c6b20 100644
--- a/python/tvm/te/hybrid/__init__.py
+++ b/python/tvm/te/hybrid/__init__.py
@@ -39,7 +39,7 @@ from .utils import _pruned_source
 
 
 def script(pyfunc):
-    """Decorate a python function function as hybrid script.
+    """Decorate a python function as hybrid script.
 
     The hybrid function support emulation mode and parsing to
     the internal language IR.
diff --git a/python/tvm/te/schedule.py b/python/tvm/te/schedule.py
index 0b6abc2566..936ead654d 100644
--- a/python/tvm/te/schedule.py
+++ b/python/tvm/te/schedule.py
@@ -648,7 +648,7 @@ class SpecializedCondition(Object):
 
 
 # Sentinel value used to indicate which groups of pre-flattening axes
-# should be used to post-flattening axes axes.  Moved from
+# should be used to post-flattening axes.  Moved from
 # te.AXIS_SEPARATOR to tir.IndexMap.AXIS_SEPARATOR for general use,
 # maintained here for backwards compatibility.
 AXIS_SEPARATOR = IndexMap.AXIS_SEPARATOR
diff --git a/python/tvm/topi/arm_cpu/qnn.py b/python/tvm/topi/arm_cpu/qnn.py
index bfd37847f3..b90ee99447 100644
--- a/python/tvm/topi/arm_cpu/qnn.py
+++ b/python/tvm/topi/arm_cpu/qnn.py
@@ -309,7 +309,7 @@ def qnn_conv2d(attrs, inputs, out_type):
     # the output width, but autotuning this value would improve performance a 
lot.
     num_outputs = _pick_num_outputs(out_width)
 
-    # Next, decide whether whether we need "parity alternation". For example, 
if we have an
+    # Next, decide whether we need "parity alternation". For example, if we 
have an
     # 8x3x3x3 kernel (8 output channels, height 3, width 3, input channels 3) 
in the OHWI layout,
     # then every output channel kernel slice will be 27 halfwords. This means 
every other output
     # channel will not be word aligned, which will cause slowness/crashes!
diff --git a/src/arith/const_fold.h b/src/arith/const_fold.h
index 22a91b91b9..e66991d70e 100644
--- a/src/arith/const_fold.h
+++ b/src/arith/const_fold.h
@@ -441,7 +441,7 @@ struct SymbolicLimits {
 /*!
  * \brief Opaque expression representing positive infinity.
  *
- *  It can can only be used as parameter of by min/max
+ *  It can only be used as parameter of by min/max
  *  for integer analysis and cannot be used in normal expressions.
  *
  * \return positive infinity.
@@ -459,7 +459,7 @@ inline bool is_pos_inf(const PrimExpr& value) { return 
value.same_as(SymbolicLim
 /*!
  * \brief Opaque expression representing negative infinity.
  *
- *  It can can only be used as parameter of by min/max
+ *  It can only be used as parameter of by min/max
  *  for integer analysis and cannot be used in normal expressions.
  *
  * \return negative infinity.
diff --git a/src/arith/product_normal_form.h b/src/arith/product_normal_form.h
index d27ca76650..ed258f207d 100644
--- a/src/arith/product_normal_form.h
+++ b/src/arith/product_normal_form.h
@@ -66,11 +66,11 @@ inline void UnpackSum(const PrimExpr& value, FLeaf fleaf, 
int sign = 1) {
 }
 
 /*!
- * \brief Helper function to multiply extent and and re-normalize.
+ * \brief Helper function to multiply extent and re-normalize.
  *
  * Multiply extent scale and re-normalize to form (x * y) * z
  *
- * NOTE on multiplication order: when have have shape (s[0], s[1], s[2]),
+ * NOTE on multiplication order: when have shape (s[0], s[1], s[2]),
  * we prefer to multiple in order of s[0] * s[1] * s[2]
 
  * \param lhs The lhs iterator
diff --git a/src/relay/collage/partition_rule.h 
b/src/relay/collage/partition_rule.h
index ca68c9b086..c9b7e93d71 100644
--- a/src/relay/collage/partition_rule.h
+++ b/src/relay/collage/partition_rule.h
@@ -375,7 +375,7 @@ class OpCallByKindPartitionRule : public PartitionRule {
  *
  * Kinds are ordered as above from least- to most-constraining w.r.t. possible 
partition
  * opportunities. When we write a kind abbreviation below we intend it to mean 
that kind *or less*.
- * And when when write 'kl -> kr' we mean it to match a sub-expression of kind 
kr or less who's
+ * And when write 'kl -> kr' we mean it to match a sub-expression of kind kr 
or less who's
  * dataflow inputs are all of kind kl or less.
  *
  * We can then mimic the classic \p FuseOps TVM Pass with the following more 
primitive combiner
diff --git a/src/relay/transforms/combine_parallel_op_batch.h 
b/src/relay/transforms/combine_parallel_op_batch.h
index db4734bffc..b9edafe754 100644
--- a/src/relay/transforms/combine_parallel_op_batch.h
+++ b/src/relay/transforms/combine_parallel_op_batch.h
@@ -133,7 +133,7 @@ class ParallelOpBatchCombiner : public ParallelOpCombiner {
 
  private:
   /* \brief name of op to replace combined ops with. for example,
-   *         for combining parallel dense, this will will be set to
+   *         for combining parallel dense, this will be set to
    *         nn.batch_matmul
    */
   std::string batch_op_name_;
diff --git a/src/runtime/c_runtime_api.cc b/src/runtime/c_runtime_api.cc
index 3471adefeb..9f2ea8e2ff 100644
--- a/src/runtime/c_runtime_api.cc
+++ b/src/runtime/c_runtime_api.cc
@@ -229,7 +229,7 @@ void DeviceAPI::SyncStreamFromTo(Device dev, 
TVMStreamHandle event_src, TVMStrea
 /*!
  * \brief Normalize error message
  *
- *  Parse them header generated by by LOG(FATAL) and ICHECK
+ *  Parse them header generated by LOG(FATAL) and ICHECK
  *  and reformat the message into the standard format.
  *
  *  This function will also merge all the stack traces into
diff --git a/src/runtime/crt/aot_executor/aot_executor.c 
b/src/runtime/crt/aot_executor/aot_executor.c
index 9e733c21c3..8ab4160306 100644
--- a/src/runtime/crt/aot_executor/aot_executor.c
+++ b/src/runtime/crt/aot_executor/aot_executor.c
@@ -228,7 +228,7 @@ int TVMAotExecutor_Release(TVMAotExecutor* executor, const 
DLDevice device) {
   int status;
 
   if (executor->num_args > 0) {
-    // free TVMNDArray data memory for each each argument
+    // free TVMNDArray data memory for each argument
     int i;
     for (i = 0; i < executor->num_args; ++i) {
       status = TVMNDArray_Release(&executor->args[i]);
diff --git a/src/target/source/ptx.h b/src/target/source/ptx.h
index 13d2f3cefc..b82a9c6ad3 100644
--- a/src/target/source/ptx.h
+++ b/src/target/source/ptx.h
@@ -146,7 +146,7 @@ std::string PrintArriveBarrierAsm(const std::string& 
barrier);
 /*!
  * \brief Print ptx barrier arrival with expect tx operation using 
mbarrier.arrive.expect_tx
  * \param barrier: The name of the barrier in shared memory.
- * \param byte_count: Increases the the tx count of the mbarrier object to 
track completion of
+ * \param byte_count: Increases the tx count of the mbarrier object to track 
completion of
  * addtional async transactions.
  */
 std::string PrintArriveBarrierExpectTxAsm(const std::string& barrier,
diff --git a/src/tir/analysis/control_flow_graph.h 
b/src/tir/analysis/control_flow_graph.h
index 35934351dc..543feeecfe 100644
--- a/src/tir/analysis/control_flow_graph.h
+++ b/src/tir/analysis/control_flow_graph.h
@@ -129,7 +129,7 @@ struct BufferTouch {
    *  accessed by this touch during this loop iteration or a
    *  subsequent loop iteration.
    *
-   * Used during backward propagation, to track indices that that are
+   * Used during backward propagation, to track indices that are
    * overwritten in the current loop iteration or in a later loop
    * iteration.
    */
diff --git a/src/tir/schedule/error.h b/src/tir/schedule/error.h
index d344f46873..fe4dbdb91a 100644
--- a/src/tir/schedule/error.h
+++ b/src/tir/schedule/error.h
@@ -69,7 +69,7 @@ class LoopPositionError : public ScheduleError {
   String DetailRenderTemplate() const final {
     std::ostringstream os;
     os << "ScheduleError: The input loop {0} of " << primitive_
-       << " is required to be be an ancestor of block {1}.";
+       << " is required to be an ancestor of block {1}.";
     return os.str();
   }
 
diff --git a/src/tir/transforms/unroll_loop.cc 
b/src/tir/transforms/unroll_loop.cc
index e43d4d7fd6..0c448d8e31 100644
--- a/src/tir/transforms/unroll_loop.cc
+++ b/src/tir/transforms/unroll_loop.cc
@@ -247,7 +247,7 @@ class LoopUnroller : public StmtExprMutator {
   int auto_max_step_;
   int auto_max_depth_;
   // max extent of loop to auto unroll
-  // this not not count the total steps, only count the number of loops
+  // this does not count the total steps, only count the number of loops
   int auto_max_extent_;
   bool explicit_unroll_;
   // Wether to unroll loops to local access.
diff --git a/tests/python/contrib/test_arm_compute_lib/infrastructure.py 
b/tests/python/contrib/test_arm_compute_lib/infrastructure.py
index 7d802e5b3a..ae3acbb096 100644
--- a/tests/python/contrib/test_arm_compute_lib/infrastructure.py
+++ b/tests/python/contrib/test_arm_compute_lib/infrastructure.py
@@ -124,7 +124,7 @@ class Device:
 
 
 def get_low_high_atol_rtol(dtype):
-    """Returns a tuple with boundary values and and tolerance for ACL tests."""
+    """Returns a tuple with boundary values and tolerance for ACL tests."""
 
     if dtype == "float32":
         low, high, atol, rtol = (-127, 128, 0.001, 0.001)
diff --git a/tests/python/contrib/test_hexagon/conv2d/test_conv2d_conv2d.md 
b/tests/python/contrib/test_hexagon/conv2d/test_conv2d_conv2d.md
index 61c1241c67..3671d90c24 100644
--- a/tests/python/contrib/test_hexagon/conv2d/test_conv2d_conv2d.md
+++ b/tests/python/contrib/test_hexagon/conv2d/test_conv2d_conv2d.md
@@ -773,7 +773,7 @@ The input cache grows to hold the vertically adjacent slice:
 
 *Filter Cache*
 
-The filter cache grows to hold the 3x3 filter filter:
+The filter cache grows to hold the 3x3 filter:
 
 ```
   allocate(packed_filter.global: Pointer(global float32), float32, [73728]), 
storage_scope = global;
diff --git a/tests/python/contrib/test_hexagon/test_benchmark_elemwise_add.py 
b/tests/python/contrib/test_hexagon/test_benchmark_elemwise_add.py
index ee58514569..a927532c8f 100644
--- a/tests/python/contrib/test_hexagon/test_benchmark_elemwise_add.py
+++ b/tests/python/contrib/test_hexagon/test_benchmark_elemwise_add.py
@@ -345,7 +345,7 @@ def _get_elemwise_add_reference_value_tensors(shape: list, 
dtype: str):
                 next_value += 1
 
     elif np_dtype.kind == "f":
-        # NOTE: For simplicity, we avoid test data that that require
+        # NOTE: For simplicity, we avoid test data that require
         # well-defined behavior on floating-point overflow.
         # But it may be reasonable to test that in the future.
         min_value = np.finfo(np_dtype).min
diff --git a/tests/python/relay/test_pass_plan_devices.py 
b/tests/python/relay/test_pass_plan_devices.py
index f654b4b453..0376410bd4 100644
--- a/tests/python/relay/test_pass_plan_devices.py
+++ b/tests/python/relay/test_pass_plan_devices.py
@@ -1612,7 +1612,7 @@ def test_free_on_device():
               %1 = @on_scope_b(on_device(%b, 
virtual_device=meta[VirtualDevice][0], constrain_body=False));
               // %c's memory scope is "scopeB", so no copy required.
               %2 = @on_scope_b(on_device(%c, 
virtual_device=meta[VirtualDevice][0], constrain_body=False));
-              // result's memory scope is is on "scopeA", so will require a 
"scopeB"->"scopeA" copy.
+              // result's memory scope is on "scopeA", so will require a 
"scopeB"->"scopeA" copy.
               %3 = add(add(%0, %1), %2);
               on_device(%3, virtual_device=meta[VirtualDevice][0], 
constrain_body=False)
             }
diff --git a/tests/python/unittest/test_tir_transform_remove_no_op.py 
b/tests/python/unittest/test_tir_transform_remove_no_op.py
index 4cf746a7c2..5b184ee6ed 100644
--- a/tests/python/unittest/test_tir_transform_remove_no_op.py
+++ b/tests/python/unittest/test_tir_transform_remove_no_op.py
@@ -414,7 +414,7 @@ class 
TestRemoveSeparatedOverwriteOfPredicatedLoop(BaseBeforeAfter):
     """Remove repeated writes to the same predicated region.
 
     Similar to TestRemoveSeparatedOverwrites, but the independent loop
-    between the first and second writes writes to a different subset
+    between the first and second writes to a different subset
     of the same buffer.
     """
 
diff --git a/web/src/environment.ts b/web/src/environment.ts
index 24126c0961..42a873f128 100644
--- a/web/src/environment.ts
+++ b/web/src/environment.ts
@@ -109,7 +109,7 @@ export class Environment implements LibraryProvider {
   }
 
   private environment(initEnv: Record<string, any>): Record<string, any> {
-    // default env can be be overriden by libraries.
+    // default env can be overriden by libraries.
     const defaultEnv = {
       "__cxa_thread_atexit": (): void => {},
       // eslint-disable-next-line @typescript-eslint/no-unused-vars

Reply via email to