manupa-arm commented on code in PR #10959:
URL: https://github.com/apache/tvm/pull/10959#discussion_r861971113
##########
src/tir/contrib/ethosu/passes.cc:
##########
@@ -110,6 +110,98 @@ tvm::transform::Pass HoistAllocates() {
TVM_REGISTER_GLOBAL("tir.contrib.ethos-u.HoistAllocates").set_body_typed(HoistAllocates);
+/*!
+ * \brief Reorders copy and compute nodes in such a way that independent DMA
copies,
+ * and computes happen in parallel.
+ * Copies to buffers with local scope are not reordered, indeed they copy LUT
+ * into the SHRAM which already happens in parallel with copying weights into
+ * the weights encoder.
+ */
+class CopyComputeReorderingMutator : public StmtExprMutator {
+ public:
+ CopyComputeReorderingMutator(int max_copy_movements) :
_max_copy_movements{max_copy_movements} {}
+
+ PrimFunc operator()(PrimFunc main_func) {
+ if (_max_copy_movements > 0) {
+ auto n{main_func.CopyOnWrite()};
+ n->body = this->VisitStmt(main_func->body);
+ return GetRef<PrimFunc>(n);
+ }
+ return main_func;
+ }
+
+ private:
+ Stmt VisitStmt_(const SeqStmtNode* op) override {
+ if (op->size() <= 1) {
+ return StmtExprMutator::VisitStmt_(op);
+ }
+
+ auto seq_stmt{GetRef<SeqStmt>(op)};
+ std::vector<Stmt> new_seq(seq_stmt->size());
+ std::copy(seq_stmt->seq.begin(), seq_stmt->seq.end(), new_seq.begin());
+
+ for (size_t index{}; index < new_seq.size(); ++index) {
+ for (int offset{}; offset < _max_copy_movements; ++offset) {
+ auto i{index - offset};
+ if (i > 0 && !stmt_is_copy(new_seq[i - 1]) &&
stmt_is_global_copy(new_seq[i])) {
+ std::swap(new_seq[i], new_seq[i - 1]);
+ } else {
+ break;
+ }
+ }
+ }
+
+ auto n{CopyOnWrite(op)};
+ n->seq = std::move(new_seq);
+ return Stmt{n};
+ }
+
+ tvm::runtime::Array<tvm::PrimExpr> get_stmt_args(Stmt stmt) {
+ auto eval_node{stmt.as<EvaluateNode>()};
+ ICHECK(eval_node) << "Expected statement to be an evaluate node, but was "
+ << stmt->GetTypeKey();
+ auto call_node{eval_node->value.as<CallNode>()};
+ ICHECK(call_node) << "Expected expression to be a call node, but was "
+ << eval_node->value->GetTypeKey();
+ return call_node->args;
+ }
+
+ bool stmt_is_copy(Stmt stmt) {
+ auto args{get_stmt_args(stmt)};
+ return args[0].as<StringImmNode>()->value == "ethosu_copy";
+ }
+
+ bool stmt_is_global_copy(Stmt stmt) {
+ auto args{get_stmt_args(stmt)};
+ return args[0].as<StringImmNode>()->value == "ethosu_copy" &&
+ args[3].as<BufferLoadNode>()->buffer.scope() == "global";
+ }
+
+ int _max_copy_movements;
+};
+
+/*!
+ * \brief A pass to reorder copy and compute nodes in such a way that
independent DMA copies,
+ * and computes happen in parallel.
+ *
+ * \param max_copy_movements: The maximum number of movements allowed for a
copy.
+ * \return tvm::transform::Pass
+ */
+tvm::transform::Pass CopyComputeReordering(int max_copy_movements) {
+ auto pass_func = [=](PrimFunc f, IRModule mod, tvm::transform::PassContext
ctx) {
+ ICHECK(mod->GetGlobalVars().size() == 1 && mod->ContainGlobalVar("main"))
Review Comment:
Lets add a PassContext option so it could be passed from above.
I think int max_copy_movement (if provided) should take priority -->
PassContext value --> then the default.
E.g. :
https://github.com/apache/tvm/blob/6b45f8dc4ad0cfecf07dbd031b1e55fe4c9b02c5/src/tir/usmp/unified_static_memory_planner.cc#L96
##########
src/tir/contrib/ethosu/passes.cc:
##########
@@ -110,6 +110,98 @@ tvm::transform::Pass HoistAllocates() {
TVM_REGISTER_GLOBAL("tir.contrib.ethos-u.HoistAllocates").set_body_typed(HoistAllocates);
+/*!
+ * \brief Reorders copy and compute nodes in such a way that independent DMA
copies,
+ * and computes happen in parallel.
+ * Copies to buffers with local scope are not reordered, indeed they copy LUT
+ * into the SHRAM which already happens in parallel with copying weights into
+ * the weights encoder.
+ */
+class CopyComputeReorderingMutator : public StmtExprMutator {
+ public:
+ CopyComputeReorderingMutator(int max_copy_movements) :
_max_copy_movements{max_copy_movements} {}
+
+ PrimFunc operator()(PrimFunc main_func) {
+ if (_max_copy_movements > 0) {
+ auto n{main_func.CopyOnWrite()};
+ n->body = this->VisitStmt(main_func->body);
+ return GetRef<PrimFunc>(n);
+ }
+ return main_func;
+ }
+
+ private:
+ Stmt VisitStmt_(const SeqStmtNode* op) override {
+ if (op->size() <= 1) {
+ return StmtExprMutator::VisitStmt_(op);
+ }
+
+ auto seq_stmt{GetRef<SeqStmt>(op)};
+ std::vector<Stmt> new_seq(seq_stmt->size());
+ std::copy(seq_stmt->seq.begin(), seq_stmt->seq.end(), new_seq.begin());
+
+ for (size_t index{}; index < new_seq.size(); ++index) {
+ for (int offset{}; offset < _max_copy_movements; ++offset) {
+ auto i{index - offset};
Review Comment:
nit : lets use the type here.
##########
src/tir/contrib/ethosu/passes.cc:
##########
@@ -110,6 +110,98 @@ tvm::transform::Pass HoistAllocates() {
TVM_REGISTER_GLOBAL("tir.contrib.ethos-u.HoistAllocates").set_body_typed(HoistAllocates);
+/*!
+ * \brief Reorders copy and compute nodes in such a way that independent DMA
copies,
+ * and computes happen in parallel.
+ * Copies to buffers with local scope are not reordered, indeed they copy LUT
+ * into the SHRAM which already happens in parallel with copying weights into
+ * the weights encoder.
+ */
+class CopyComputeReorderingMutator : public StmtExprMutator {
+ public:
+ CopyComputeReorderingMutator(int max_copy_movements) :
_max_copy_movements{max_copy_movements} {}
+
+ PrimFunc operator()(PrimFunc main_func) {
+ if (_max_copy_movements > 0) {
+ auto n{main_func.CopyOnWrite()};
+ n->body = this->VisitStmt(main_func->body);
+ return GetRef<PrimFunc>(n);
+ }
+ return main_func;
+ }
+
+ private:
+ Stmt VisitStmt_(const SeqStmtNode* op) override {
+ if (op->size() <= 1) {
+ return StmtExprMutator::VisitStmt_(op);
+ }
+
+ auto seq_stmt{GetRef<SeqStmt>(op)};
+ std::vector<Stmt> new_seq(seq_stmt->size());
+ std::copy(seq_stmt->seq.begin(), seq_stmt->seq.end(), new_seq.begin());
+
+ for (size_t index{}; index < new_seq.size(); ++index) {
+ for (int offset{}; offset < _max_copy_movements; ++offset) {
+ auto i{index - offset};
+ if (i > 0 && !stmt_is_copy(new_seq[i - 1]) &&
stmt_is_global_copy(new_seq[i])) {
+ std::swap(new_seq[i], new_seq[i - 1]);
+ } else {
+ break;
+ }
+ }
+ }
+
+ auto n{CopyOnWrite(op)};
Review Comment:
nit : lets use typed better variable name.
##########
src/tir/contrib/ethosu/passes.cc:
##########
@@ -110,6 +110,98 @@ tvm::transform::Pass HoistAllocates() {
TVM_REGISTER_GLOBAL("tir.contrib.ethos-u.HoistAllocates").set_body_typed(HoistAllocates);
+/*!
+ * \brief Reorders copy and compute nodes in such a way that independent DMA
copies,
+ * and computes happen in parallel.
+ * Copies to buffers with local scope are not reordered, indeed they copy LUT
+ * into the SHRAM which already happens in parallel with copying weights into
+ * the weights encoder.
+ */
+class CopyComputeReorderingMutator : public StmtExprMutator {
+ public:
+ CopyComputeReorderingMutator(int max_copy_movements) :
_max_copy_movements{max_copy_movements} {}
+
+ PrimFunc operator()(PrimFunc main_func) {
+ if (_max_copy_movements > 0) {
+ auto n{main_func.CopyOnWrite()};
+ n->body = this->VisitStmt(main_func->body);
+ return GetRef<PrimFunc>(n);
+ }
+ return main_func;
+ }
+
+ private:
+ Stmt VisitStmt_(const SeqStmtNode* op) override {
+ if (op->size() <= 1) {
+ return StmtExprMutator::VisitStmt_(op);
+ }
+
+ auto seq_stmt{GetRef<SeqStmt>(op)};
+ std::vector<Stmt> new_seq(seq_stmt->size());
+ std::copy(seq_stmt->seq.begin(), seq_stmt->seq.end(), new_seq.begin());
+
+ for (size_t index{}; index < new_seq.size(); ++index) {
+ for (int offset{}; offset < _max_copy_movements; ++offset) {
+ auto i{index - offset};
+ if (i > 0 && !stmt_is_copy(new_seq[i - 1]) &&
stmt_is_global_copy(new_seq[i])) {
+ std::swap(new_seq[i], new_seq[i - 1]);
+ } else {
+ break;
+ }
+ }
+ }
+
+ auto n{CopyOnWrite(op)};
+ n->seq = std::move(new_seq);
+ return Stmt{n};
+ }
+
+ tvm::runtime::Array<tvm::PrimExpr> get_stmt_args(Stmt stmt) {
+ auto eval_node{stmt.as<EvaluateNode>()};
+ ICHECK(eval_node) << "Expected statement to be an evaluate node, but was "
+ << stmt->GetTypeKey();
+ auto call_node{eval_node->value.as<CallNode>()};
+ ICHECK(call_node) << "Expected expression to be a call node, but was "
+ << eval_node->value->GetTypeKey();
+ return call_node->args;
+ }
+
+ bool stmt_is_copy(Stmt stmt) {
Review Comment:
lets use const Stmt& to pass const references here.
##########
src/tir/contrib/ethosu/passes.cc:
##########
@@ -110,6 +110,98 @@ tvm::transform::Pass HoistAllocates() {
TVM_REGISTER_GLOBAL("tir.contrib.ethos-u.HoistAllocates").set_body_typed(HoistAllocates);
+/*!
+ * \brief Reorders copy and compute nodes in such a way that independent DMA
copies,
+ * and computes happen in parallel.
+ * Copies to buffers with local scope are not reordered, indeed they copy LUT
+ * into the SHRAM which already happens in parallel with copying weights into
+ * the weights encoder.
+ */
+class CopyComputeReorderingMutator : public StmtExprMutator {
+ public:
+ CopyComputeReorderingMutator(int max_copy_movements) :
_max_copy_movements{max_copy_movements} {}
+
+ PrimFunc operator()(PrimFunc main_func) {
+ if (_max_copy_movements > 0) {
+ auto n{main_func.CopyOnWrite()};
+ n->body = this->VisitStmt(main_func->body);
+ return GetRef<PrimFunc>(n);
+ }
+ return main_func;
+ }
+
+ private:
+ Stmt VisitStmt_(const SeqStmtNode* op) override {
+ if (op->size() <= 1) {
+ return StmtExprMutator::VisitStmt_(op);
+ }
+
+ auto seq_stmt{GetRef<SeqStmt>(op)};
+ std::vector<Stmt> new_seq(seq_stmt->size());
+ std::copy(seq_stmt->seq.begin(), seq_stmt->seq.end(), new_seq.begin());
+
+ for (size_t index{}; index < new_seq.size(); ++index) {
+ for (int offset{}; offset < _max_copy_movements; ++offset) {
+ auto i{index - offset};
+ if (i > 0 && !stmt_is_copy(new_seq[i - 1]) &&
stmt_is_global_copy(new_seq[i])) {
+ std::swap(new_seq[i], new_seq[i - 1]);
+ } else {
+ break;
+ }
+ }
+ }
+
+ auto n{CopyOnWrite(op)};
+ n->seq = std::move(new_seq);
+ return Stmt{n};
+ }
+
+ tvm::runtime::Array<tvm::PrimExpr> get_stmt_args(Stmt stmt) {
+ auto eval_node{stmt.as<EvaluateNode>()};
+ ICHECK(eval_node) << "Expected statement to be an evaluate node, but was "
+ << stmt->GetTypeKey();
+ auto call_node{eval_node->value.as<CallNode>()};
+ ICHECK(call_node) << "Expected expression to be a call node, but was "
+ << eval_node->value->GetTypeKey();
+ return call_node->args;
+ }
+
+ bool stmt_is_copy(Stmt stmt) {
+ auto args{get_stmt_args(stmt)};
+ return args[0].as<StringImmNode>()->value == "ethosu_copy";
+ }
+
+ bool stmt_is_global_copy(Stmt stmt) {
+ auto args{get_stmt_args(stmt)};
+ return args[0].as<StringImmNode>()->value == "ethosu_copy" &&
+ args[3].as<BufferLoadNode>()->buffer.scope() == "global";
+ }
+
+ int _max_copy_movements;
Review Comment:
Please add docs for these variables.
##########
src/tir/contrib/ethosu/passes.cc:
##########
@@ -110,6 +110,98 @@ tvm::transform::Pass HoistAllocates() {
TVM_REGISTER_GLOBAL("tir.contrib.ethos-u.HoistAllocates").set_body_typed(HoistAllocates);
+/*!
+ * \brief Reorders copy and compute nodes in such a way that independent DMA
copies,
+ * and computes happen in parallel.
+ * Copies to buffers with local scope are not reordered, indeed they copy LUT
+ * into the SHRAM which already happens in parallel with copying weights into
+ * the weights encoder.
+ */
+class CopyComputeReorderingMutator : public StmtExprMutator {
+ public:
+ CopyComputeReorderingMutator(int max_copy_movements) :
_max_copy_movements{max_copy_movements} {}
+
+ PrimFunc operator()(PrimFunc main_func) {
+ if (_max_copy_movements > 0) {
+ auto n{main_func.CopyOnWrite()};
+ n->body = this->VisitStmt(main_func->body);
+ return GetRef<PrimFunc>(n);
+ }
+ return main_func;
+ }
+
+ private:
+ Stmt VisitStmt_(const SeqStmtNode* op) override {
+ if (op->size() <= 1) {
+ return StmtExprMutator::VisitStmt_(op);
+ }
+
+ auto seq_stmt{GetRef<SeqStmt>(op)};
+ std::vector<Stmt> new_seq(seq_stmt->size());
+ std::copy(seq_stmt->seq.begin(), seq_stmt->seq.end(), new_seq.begin());
+
+ for (size_t index{}; index < new_seq.size(); ++index) {
+ for (int offset{}; offset < _max_copy_movements; ++offset) {
+ auto i{index - offset};
+ if (i > 0 && !stmt_is_copy(new_seq[i - 1]) &&
stmt_is_global_copy(new_seq[i])) {
+ std::swap(new_seq[i], new_seq[i - 1]);
+ } else {
+ break;
+ }
+ }
+ }
+
+ auto n{CopyOnWrite(op)};
+ n->seq = std::move(new_seq);
+ return Stmt{n};
+ }
+
+ tvm::runtime::Array<tvm::PrimExpr> get_stmt_args(Stmt stmt) {
Review Comment:
lets use const Stmt& to pass const references here.
##########
src/tir/contrib/ethosu/passes.cc:
##########
@@ -110,6 +110,98 @@ tvm::transform::Pass HoistAllocates() {
TVM_REGISTER_GLOBAL("tir.contrib.ethos-u.HoistAllocates").set_body_typed(HoistAllocates);
+/*!
+ * \brief Reorders copy and compute nodes in such a way that independent DMA
copies,
+ * and computes happen in parallel.
+ * Copies to buffers with local scope are not reordered, indeed they copy LUT
+ * into the SHRAM which already happens in parallel with copying weights into
+ * the weights encoder.
+ */
+class CopyComputeReorderingMutator : public StmtExprMutator {
+ public:
+ CopyComputeReorderingMutator(int max_copy_movements) :
_max_copy_movements{max_copy_movements} {}
+
+ PrimFunc operator()(PrimFunc main_func) {
+ if (_max_copy_movements > 0) {
+ auto n{main_func.CopyOnWrite()};
+ n->body = this->VisitStmt(main_func->body);
+ return GetRef<PrimFunc>(n);
+ }
+ return main_func;
+ }
+
+ private:
+ Stmt VisitStmt_(const SeqStmtNode* op) override {
+ if (op->size() <= 1) {
+ return StmtExprMutator::VisitStmt_(op);
+ }
+
+ auto seq_stmt{GetRef<SeqStmt>(op)};
+ std::vector<Stmt> new_seq(seq_stmt->size());
+ std::copy(seq_stmt->seq.begin(), seq_stmt->seq.end(), new_seq.begin());
+
+ for (size_t index{}; index < new_seq.size(); ++index) {
+ for (int offset{}; offset < _max_copy_movements; ++offset) {
Review Comment:
I think its clearer lets stick to explicit initialization.
##########
src/tir/contrib/ethosu/passes.cc:
##########
@@ -110,6 +110,98 @@ tvm::transform::Pass HoistAllocates() {
TVM_REGISTER_GLOBAL("tir.contrib.ethos-u.HoistAllocates").set_body_typed(HoistAllocates);
+/*!
+ * \brief Reorders copy and compute nodes in such a way that independent DMA
copies,
+ * and computes happen in parallel.
+ * Copies to buffers with local scope are not reordered, indeed they copy LUT
+ * into the SHRAM which already happens in parallel with copying weights into
+ * the weights encoder.
+ */
+class CopyComputeReorderingMutator : public StmtExprMutator {
+ public:
+ CopyComputeReorderingMutator(int max_copy_movements) :
_max_copy_movements{max_copy_movements} {}
+
+ PrimFunc operator()(PrimFunc main_func) {
+ if (_max_copy_movements > 0) {
+ auto n{main_func.CopyOnWrite()};
+ n->body = this->VisitStmt(main_func->body);
+ return GetRef<PrimFunc>(n);
+ }
+ return main_func;
+ }
+
+ private:
+ Stmt VisitStmt_(const SeqStmtNode* op) override {
+ if (op->size() <= 1) {
+ return StmtExprMutator::VisitStmt_(op);
+ }
+
+ auto seq_stmt{GetRef<SeqStmt>(op)};
+ std::vector<Stmt> new_seq(seq_stmt->size());
+ std::copy(seq_stmt->seq.begin(), seq_stmt->seq.end(), new_seq.begin());
+
+ for (size_t index{}; index < new_seq.size(); ++index) {
+ for (int offset{}; offset < _max_copy_movements; ++offset) {
+ auto i{index - offset};
+ if (i > 0 && !stmt_is_copy(new_seq[i - 1]) &&
stmt_is_global_copy(new_seq[i])) {
+ std::swap(new_seq[i], new_seq[i - 1]);
+ } else {
+ break;
+ }
+ }
+ }
+
+ auto n{CopyOnWrite(op)};
+ n->seq = std::move(new_seq);
+ return Stmt{n};
+ }
+
+ tvm::runtime::Array<tvm::PrimExpr> get_stmt_args(Stmt stmt) {
+ auto eval_node{stmt.as<EvaluateNode>()};
+ ICHECK(eval_node) << "Expected statement to be an evaluate node, but was "
+ << stmt->GetTypeKey();
+ auto call_node{eval_node->value.as<CallNode>()};
+ ICHECK(call_node) << "Expected expression to be a call node, but was "
+ << eval_node->value->GetTypeKey();
+ return call_node->args;
+ }
+
+ bool stmt_is_copy(Stmt stmt) {
+ auto args{get_stmt_args(stmt)};
+ return args[0].as<StringImmNode>()->value == "ethosu_copy";
+ }
+
+ bool stmt_is_global_copy(Stmt stmt) {
Review Comment:
lets use const Stmt& to pass const references here.
##########
src/tir/contrib/ethosu/passes.cc:
##########
@@ -110,6 +110,98 @@ tvm::transform::Pass HoistAllocates() {
TVM_REGISTER_GLOBAL("tir.contrib.ethos-u.HoistAllocates").set_body_typed(HoistAllocates);
+/*!
+ * \brief Reorders copy and compute nodes in such a way that independent DMA
copies,
+ * and computes happen in parallel.
+ * Copies to buffers with local scope are not reordered, indeed they copy LUT
+ * into the SHRAM which already happens in parallel with copying weights into
+ * the weights encoder.
+ */
+class CopyComputeReorderingMutator : public StmtExprMutator {
+ public:
+ CopyComputeReorderingMutator(int max_copy_movements) :
_max_copy_movements{max_copy_movements} {}
+
+ PrimFunc operator()(PrimFunc main_func) {
+ if (_max_copy_movements > 0) {
+ auto n{main_func.CopyOnWrite()};
+ n->body = this->VisitStmt(main_func->body);
+ return GetRef<PrimFunc>(n);
+ }
+ return main_func;
+ }
+
+ private:
+ Stmt VisitStmt_(const SeqStmtNode* op) override {
+ if (op->size() <= 1) {
+ return StmtExprMutator::VisitStmt_(op);
+ }
+
+ auto seq_stmt{GetRef<SeqStmt>(op)};
+ std::vector<Stmt> new_seq(seq_stmt->size());
+ std::copy(seq_stmt->seq.begin(), seq_stmt->seq.end(), new_seq.begin());
+
+ for (size_t index{}; index < new_seq.size(); ++index) {
+ for (int offset{}; offset < _max_copy_movements; ++offset) {
+ auto i{index - offset};
+ if (i > 0 && !stmt_is_copy(new_seq[i - 1]) &&
stmt_is_global_copy(new_seq[i])) {
Review Comment:
docs : please add some comment to explaint what is being done in the loop.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]