This is an automated email from the ASF dual-hosted git repository.

tqchen pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tvm.git


The following commit(s) were added to refs/heads/main by this push:
     new 27a314eb73 [MISC] Fix compilation warnings of unnecessary 
`std::move()` calls (#18130)
27a314eb73 is described below

commit 27a314eb73d2805d8ffe0c4d6948eeace4f06fd7
Author: Siyuan Feng <[email protected]>
AuthorDate: Fri Jul 11 20:03:28 2025 +0800

    [MISC] Fix compilation warnings of unnecessary `std::move()` calls (#18130)
    
    Fix compilation warnings of "moving a temporary object prevents copy 
elision"
---
 src/relax/analysis/graph_partitioner.cc           | 2 +-
 src/relax/ir/expr.cc                              | 2 +-
 src/relax/op/nn/attention.cc                      | 5 ++---
 src/relax/op/nn/convolution.cc                    | 4 ++--
 src/relax/op/nn/nn.cc                             | 4 ++--
 src/relax/op/tensor/grad.cc                       | 7 +++----
 src/relax/transform/convert_layout.cc             | 2 +-
 src/relax/transform/fuse_tir.cc                   | 2 +-
 src/relax/transform/legalize_ops.cc               | 2 +-
 src/relax/transform/remove_purity_checking.cc     | 2 +-
 src/relax/transform/to_mixed_precision.cc         | 4 ++--
 src/script/printer/relax/function.cc              | 6 +++---
 src/script/printer/tir/function.cc                | 4 ++--
 src/tir/ir/data_type_rewriter.cc                  | 2 +-
 src/tir/schedule/primitive/cache_read_write.cc    | 6 +++---
 src/tir/schedule/primitive/compute_inline.cc      | 4 ++--
 src/tir/schedule/primitive/loop_transformation.cc | 2 +-
 src/tir/transforms/profile_instrumentation.cc     | 2 +-
 18 files changed, 30 insertions(+), 32 deletions(-)

diff --git a/src/relax/analysis/graph_partitioner.cc 
b/src/relax/analysis/graph_partitioner.cc
index 69408afbf4..00f4da4006 100644
--- a/src/relax/analysis/graph_partitioner.cc
+++ b/src/relax/analysis/graph_partitioner.cc
@@ -286,7 +286,7 @@ size_t GraphPartitioner::CountFusedArgs(const 
IndexedForwardGraph& graph,
 }
 
 void GraphPartitioner::InitGroups(const IndexedForwardGraph& graph) {
-  auto args_counter = [this](const tvm::Object* obj) {
+  auto args_counter = [](const tvm::Object* obj) {
     size_t args_num = 0;
     if (auto call_node = GetRef<ObjectRef>(obj).as<CallNode>()) {
       for (auto& it : call_node->args) {
diff --git a/src/relax/ir/expr.cc b/src/relax/ir/expr.cc
index da4f3cb22e..4585c45ef4 100644
--- a/src/relax/ir/expr.cc
+++ b/src/relax/ir/expr.cc
@@ -621,7 +621,7 @@ Function::Function(Array<Var> params, Expr body, 
Optional<StructInfo> ret_struct
   ObjectPtr<FunctionNode> n = make_object<FunctionNode>();
   n->params = std::move(params);
   n->body = std::move(body);
-  n->ret_struct_info = std::move(ret_struct_info.value());
+  n->ret_struct_info = ret_struct_info.value();
   n->is_pure = is_pure;
   n->struct_info_ = std::move(func_sinfo);
   n->attrs = std::move(attrs);
diff --git a/src/relax/op/nn/attention.cc b/src/relax/op/nn/attention.cc
index 5dfa39ded5..e6f410424b 100644
--- a/src/relax/op/nn/attention.cc
+++ b/src/relax/op/nn/attention.cc
@@ -20,7 +20,6 @@
 #include "attention.h"
 
 #include <utility>
-#include <vector>
 
 namespace tvm {
 namespace relax {
@@ -37,8 +36,8 @@ Expr attention(Expr query, Expr key, Expr value, 
Optional<Expr> bias, Optional<F
 
   if (bias) {
     return Call(Op::Get("relax.nn.attention_bias"),
-                {std::move(query), std::move(key), std::move(value), 
std::move(bias.value())},
-                Attrs(attrs), {});
+                {std::move(query), std::move(key), std::move(value), 
bias.value()}, Attrs(attrs),
+                {});
   }
   return Call(Op::Get("relax.nn.attention"), {std::move(query), 
std::move(key), std::move(value)},
               Attrs(attrs), {});
diff --git a/src/relax/op/nn/convolution.cc b/src/relax/op/nn/convolution.cc
index 4f07c78458..d86e6442fe 100644
--- a/src/relax/op/nn/convolution.cc
+++ b/src/relax/op/nn/convolution.cc
@@ -595,7 +595,7 @@ Expr conv1d_transpose(Expr data, Expr weight, Array<IntImm> 
strides, Array<IntIm
   attrs->groups = groups;
   attrs->data_layout = data_layout;
   attrs->kernel_layout = std::move(kernel_layout);
-  attrs->out_layout = std::move(out_layout.value_or(data_layout));
+  attrs->out_layout = out_layout.value_or(data_layout);
   attrs->out_dtype = std::move(out_dtype.value_or(DataType::Void()));
   const Op& op = Op::Get("relax.nn.conv1d_transpose");
   return Call(op, {data, weight}, Attrs(attrs), {});
@@ -732,7 +732,7 @@ Expr conv2d_transpose(Expr data, Expr weight, Array<IntImm> 
strides, Array<IntIm
   attrs->groups = groups;
   attrs->data_layout = data_layout;
   attrs->kernel_layout = std::move(kernel_layout);
-  attrs->out_layout = std::move(out_layout.value_or(data_layout));
+  attrs->out_layout = out_layout.value_or(data_layout);
   attrs->out_dtype = std::move(out_dtype.value_or(DataType::Void()));
   const Op& op = Op::Get("relax.nn.conv2d_transpose");
   return Call(op, {data, weight}, Attrs(attrs), {});
diff --git a/src/relax/op/nn/nn.cc b/src/relax/op/nn/nn.cc
index f10e46dfd3..64c849e547 100644
--- a/src/relax/op/nn/nn.cc
+++ b/src/relax/op/nn/nn.cc
@@ -905,8 +905,8 @@ Expr nll_loss(Expr predictions, Expr targets, 
Optional<Expr> weights, String red
 
   static const Op& op = Op::Get("relax.nn.nll_loss");
   if (weights.defined()) {
-    return Call(op, {std::move(predictions), std::move(targets), 
std::move(weights.value())},
-                Attrs{attrs}, {});
+    return Call(op, {std::move(predictions), std::move(targets), 
weights.value()}, Attrs{attrs},
+                {});
   } else {
     return Call(op, {std::move(predictions), std::move(targets)}, 
Attrs{attrs}, {});
   }
diff --git a/src/relax/op/tensor/grad.cc b/src/relax/op/tensor/grad.cc
index c25d587052..506a50c0e7 100644
--- a/src/relax/op/tensor/grad.cc
+++ b/src/relax/op/tensor/grad.cc
@@ -101,10 +101,9 @@ Expr nll_loss_backward(Expr output_grad, Expr predictions, 
Expr targets, Optiona
 
   static const Op& op = Op::Get("relax.grad.nll_loss_backward");
   if (weights.defined()) {
-    return Call(op,
-                {std::move(output_grad), std::move(predictions), 
std::move(targets),
-                 std::move(weights.value())},
-                Attrs{attrs}, {});
+    return Call(
+        op, {std::move(output_grad), std::move(predictions), 
std::move(targets), weights.value()},
+        Attrs{attrs}, {});
   } else {
     return Call(op, {std::move(output_grad), std::move(predictions), 
std::move(targets)},
                 Attrs{attrs}, {});
diff --git a/src/relax/transform/convert_layout.cc 
b/src/relax/transform/convert_layout.cc
index a9d4821391..0cdaa0b192 100644
--- a/src/relax/transform/convert_layout.cc
+++ b/src/relax/transform/convert_layout.cc
@@ -127,7 +127,7 @@ class LayoutConvertMutator : public ExprMutator {
         ObjectPtr<LayoutTransformAttrs> attrs = 
make_object<LayoutTransformAttrs>();
         Array<IntImm> axis_separator;
         Array<IntImm> input_axis_separator;
-        attrs->index_map = 
std::move(Downcast<IndexMap>(LoadJSON(SaveJSON(index_map))));
+        attrs->index_map = Downcast<IndexMap>(LoadJSON(SaveJSON(index_map)));
         attrs->axis_separators = std::move(axis_separator);
         attrs->input_axis_separators = std::move(input_axis_separator);
         const Op& layout_transform_op_ = Op::Get("relax.layout_transform");
diff --git a/src/relax/transform/fuse_tir.cc b/src/relax/transform/fuse_tir.cc
index f3b9108fa6..a774d24a63 100644
--- a/src/relax/transform/fuse_tir.cc
+++ b/src/relax/transform/fuse_tir.cc
@@ -846,7 +846,7 @@ class FusedTIRConstructor : public ExprVisitor {
     if (is_inplace) {
       const auto* attrs = call->attrs.as<CallTIRInplaceAttrs>();
       CHECK(attrs) << "Must have CallTIRInplaceAttrs for an in-place call";
-      output_idxs = std::move(GetInplaceOutputIndices(attrs->inplace_indices, 
num_inputs));
+      output_idxs = GetInplaceOutputIndices(attrs->inplace_indices, 
num_inputs);
     } else {
       for (size_t i = 0; i < output_size; i++) {
         output_idxs.push_back(num_inputs + i);
diff --git a/src/relax/transform/legalize_ops.cc 
b/src/relax/transform/legalize_ops.cc
index a0ac6fffb6..c2a1ab4714 100644
--- a/src/relax/transform/legalize_ops.cc
+++ b/src/relax/transform/legalize_ops.cc
@@ -63,7 +63,7 @@ class LegalizeMutator : public ExprMutator {
                            bool enable_warning)
       : ExprMutator(mod), mod_(std::move(mod)), 
enable_warning_(enable_warning) {
     if (cmap) {
-      cmap_ = std::move(cmap.value());
+      cmap_ = cmap.value();
     }
   }
 
diff --git a/src/relax/transform/remove_purity_checking.cc 
b/src/relax/transform/remove_purity_checking.cc
index 31e771d2ad..001ed6d339 100644
--- a/src/relax/transform/remove_purity_checking.cc
+++ b/src/relax/transform/remove_purity_checking.cc
@@ -36,7 +36,7 @@ class PurityRemover : public ExprMutator {
     bool purity = func->is_pure;
     auto ret = func;
     if (purity) {
-      ret = std::move(WithAttr<Function>(func, relax::attr::kForcePure, true));
+      ret = WithAttr<Function>(func, relax::attr::kForcePure, true);
     }
     auto new_body = VisitExpr(ret->body);
     if (!new_body.same_as(ret->body)) {
diff --git a/src/relax/transform/to_mixed_precision.cc 
b/src/relax/transform/to_mixed_precision.cc
index 531ecefd5d..d14b6ab142 100644
--- a/src/relax/transform/to_mixed_precision.cc
+++ b/src/relax/transform/to_mixed_precision.cc
@@ -532,7 +532,7 @@ class ToMixedPrecisionRewriter : public ExprMutator {
       return;
     }
     ObjectPtr<TupleNode> new_tuple = make_object<TupleNode>(*tuple_node);
-    new_tuple->fields = std::move(RemapArgs(tuple_node->fields));
+    new_tuple->fields = RemapArgs(tuple_node->fields);
     new_tuple->struct_info_ = std::nullopt;
     Expr new_value = builder_->Normalize(Tuple(new_tuple));
     if (!binding->var->IsInstance<DataflowVarNode>()) {
@@ -600,7 +600,7 @@ class ToMixedPrecisionRewriter : public ExprMutator {
 
 Expr ToMixedPrecision(const Function& f, const DataType& out_dtype,
                       Optional<Array<String>> fp16_input_names) {
-  VarDTypeMap only_fp16_map = std::move(DTypeDecisionCollector::Collect(f, 
out_dtype));
+  VarDTypeMap only_fp16_map = DTypeDecisionCollector::Collect(f, out_dtype);
   std::unordered_set<std::string> fp16_input_names_set;
   if (fp16_input_names) {
     fp16_input_names_set.insert(fp16_input_names.value().begin(), 
fp16_input_names.value().end());
diff --git a/src/script/printer/relax/function.cc 
b/src/script/printer/relax/function.cc
index cba6f88ff8..fb8cee3cae 100644
--- a/src/script/printer/relax/function.cc
+++ b/src/script/printer/relax/function.cc
@@ -50,9 +50,9 @@ TVM_STATIC_IR_FUNCTOR(IRDocsifier, vtable)
       // if we are binding a local definition, then calling d->Define
       // will result in a repeated definition and an incorrect displayed name
       if (Optional<String> name = GetBindingName(d)) {
-        func_name = std::move(IdDoc(name.value()));
+        func_name = IdDoc(name.value());
       } else {
-        func_name = std::move(IdDoc(FindFunctionName(d, n).value_or("main")));
+        func_name = IdDoc(FindFunctionName(d, n).value_or("main"));
       }
       (*f)->AddDispatchToken(d, "relax");
       (*f)->is_func = true;
@@ -118,7 +118,7 @@ TVM_STATIC_IR_FUNCTOR(IRDocsifier, vtable)
         dec_values.push_back(LiteralDoc::Boolean(true, 
Optional<ObjectPath>()));
       }
       if (dec_keys.size()) {
-        decorator = std::move(decorator->Call(pos_args, dec_keys, dec_values));
+        decorator = decorator->Call(pos_args, dec_keys, dec_values);
       }
 
       // Step 6. Print body
diff --git a/src/script/printer/tir/function.cc 
b/src/script/printer/tir/function.cc
index 10f7bd7452..1d035609cc 100644
--- a/src/script/printer/tir/function.cc
+++ b/src/script/printer/tir/function.cc
@@ -190,8 +190,8 @@ TVM_STATIC_IR_FUNCTOR(IRDocsifier, vtable)
       // mark private if there is no global symbol
       if (!func->attrs.defined() || 
!func->attrs->dict.count(tvm::attr::kGlobalSymbol)) {
         Array<ExprDoc> pos_args;
-        decorator = std::move(decorator->Call(pos_args, {"private"},
-                                              {LiteralDoc::Boolean(true, 
Optional<ObjectPath>())}));
+        decorator = decorator->Call(pos_args, {"private"},
+                                    {LiteralDoc::Boolean(true, 
Optional<ObjectPath>())});
       }
 
       return HeaderWrapper(d, FunctionDoc(
diff --git a/src/tir/ir/data_type_rewriter.cc b/src/tir/ir/data_type_rewriter.cc
index 11b29016bf..f65566109f 100644
--- a/src/tir/ir/data_type_rewriter.cc
+++ b/src/tir/ir/data_type_rewriter.cc
@@ -557,7 +557,7 @@ Stmt IndexDataTypeRewriter::VisitStmt_(const ForNode* op) {
       auto old_thread_binding = op->thread_binding.value();
       auto* ptr = old_thread_binding.CopyOnWrite();
       ptr->var = old_thread_binding->var.copy_with_dtype(new_loop_var.dtype());
-      n->thread_binding = 
std::move(Optional<IterVar>(std::move(old_thread_binding)));
+      n->thread_binding = Optional<IterVar>(std::move(old_thread_binding));
     }
     n->body = new_body;
     return new_for;
diff --git a/src/tir/schedule/primitive/cache_read_write.cc 
b/src/tir/schedule/primitive/cache_read_write.cc
index 7f4415f85c..38cafbe151 100644
--- a/src/tir/schedule/primitive/cache_read_write.cc
+++ b/src/tir/schedule/primitive/cache_read_write.cc
@@ -986,7 +986,7 @@ class CacheReadRewriter : public StmtExprMutator {
       ObjectPtr<BufferLoadNode> n = make_object<BufferLoadNode>(*load);
       n->buffer = info_->write_buffer;
       if (!cache_full_region_) {
-        n->indices = std::move(RewriteIndices(load->indices));
+        n->indices = RewriteIndices(load->indices);
       }
       return PrimExpr(n);
     }
@@ -1257,7 +1257,7 @@ class CacheWriteRewriter : public StmtExprMutator {
       auto n = CopyOnWrite(stmt.get());
       n->buffer = info_->read_buffer;
       if (!cache_full_region_) {
-        n->indices = std::move(RewriteIndices(n->indices));
+        n->indices = RewriteIndices(n->indices);
       }
       return Stmt(n);
     } else {
@@ -1270,7 +1270,7 @@ class CacheWriteRewriter : public StmtExprMutator {
       ObjectPtr<BufferLoadNode> n = make_object<BufferLoadNode>(*load);
       n->buffer = info_->read_buffer;
       if (!cache_full_region_) {
-        n->indices = std::move(RewriteIndices(n->indices));
+        n->indices = RewriteIndices(n->indices);
       }
       return PrimExpr(n);
     }
diff --git a/src/tir/schedule/primitive/compute_inline.cc 
b/src/tir/schedule/primitive/compute_inline.cc
index 5a6cddd3cc..4e037158d9 100644
--- a/src/tir/schedule/primitive/compute_inline.cc
+++ b/src/tir/schedule/primitive/compute_inline.cc
@@ -378,8 +378,8 @@ class BaseInliner : public StmtExprMutator {
     if (!is_scope_root && (std::any_of(reads.begin(), reads.end(), 
f_access_inline_buffer) ||
                            std::any_of(writes.begin(), writes.end(), 
f_access_inline_buffer))) {
       Array<Array<BufferRegion>> inspected = GetBlockReadWriteRegion(block, 
buffer_var_map_);
-      reads = std::move(inspected[0]);
-      writes = std::move(inspected[1]);
+      reads = inspected[0];
+      writes = inspected[1];
     }
     // Step 3. Assemble the result
     BlockNode* n = block.CopyOnWrite();
diff --git a/src/tir/schedule/primitive/loop_transformation.cc 
b/src/tir/schedule/primitive/loop_transformation.cc
index 88e6f61eb3..7baf4e98b7 100644
--- a/src/tir/schedule/primitive/loop_transformation.cc
+++ b/src/tir/schedule/primitive/loop_transformation.cc
@@ -616,7 +616,7 @@ class BlockMutator : public StmtExprMutator {
 
     if (!op->loop_var.same_as(new_var)) {
       // If the partioned loop contains nested for loop, then create new 
iteration variable instance
-      res.CopyOnWrite()->body = std::move(tir::Substitute(res->body, 
{{op->loop_var, new_var}}));
+      res.CopyOnWrite()->body = tir::Substitute(res->body, {{op->loop_var, 
new_var}});
       res.CopyOnWrite()->loop_var = new_var;
     }
     return res;
diff --git a/src/tir/transforms/profile_instrumentation.cc 
b/src/tir/transforms/profile_instrumentation.cc
index f8548ca59a..4061a2abf5 100644
--- a/src/tir/transforms/profile_instrumentation.cc
+++ b/src/tir/transforms/profile_instrumentation.cc
@@ -160,7 +160,7 @@ class InstrumentIntrin : public StmtMutator {
 
   void GetLoopInfo(PrimFuncNode* op) {
     LoopAnalyzer analzer;
-    loops_ = std::move(analzer.Analyze(op->body));
+    loops_ = analzer.Analyze(op->body);
   }
 
   Stmt VisitStmt_(const SeqStmtNode* op) final {

Reply via email to