[GitHub] [incubator-tvm] minminsun commented on a change in pull request #6297: [Ansor][AutoTVM v2.0] Phase 2: Layout Rewrite in AutoScheduler

2020-08-26 Thread GitBox


minminsun commented on a change in pull request #6297:
URL: https://github.com/apache/incubator-tvm/pull/6297#discussion_r477112491



##
File path: src/auto_scheduler/compute_dag.cc
##
@@ -665,9 +666,349 @@ ComputeDAG::ComputeDAG(Array tensors) {
   data_ = std::move(node);
 }
 
+/*!
+ * \brief utility function for kernel_layout_transform
+ */
+inline void parse_kernel_layout(const String& layout, Array* shape,
+std::vector* axes) {
+  int32_t factor = 0;
+  std::string axis = "";
+  for (char c : std::string(layout)) {
+if (c >= 'A' && c <= 'z') {
+  axis += c;
+  if (factor != 0) {
+shape->push_back(factor);
+factor = 0;
+  }
+} else if (c >= '0' && c <= '9') {
+  factor = factor * 10 + c - '0';
+  if (!axis.empty()) {
+axes->push_back(axis);
+axis = "";
+  }
+} else {
+  LOG(FATAL) << "Invalid layout " << layout;
+}
+  }
+  if (!axis.empty()) {
+axes->push_back(axis);
+  }
+}
+
+std::string BaseName(const std::string& str) { return str.substr(0, 
str.rfind("_")); }
+
+class IndexRewriter : public StmtExprMutator {
+ public:
+  IndexRewriter(const te::Operation& placeholder_op, const std::string& 
new_layout)
+  : placeholder_op_(placeholder_op), new_layout_(new_layout) {}
+
+  PrimExpr Rewrite(PrimExpr expr) { return this->VisitExpr(expr); }
+
+  PrimExpr VisitExpr_(const ProducerLoadNode* op) final {
+te::Tensor t = Downcast(op->producer);
+if (t->op == placeholder_op_) {
+  Array new_shape;
+  std::vector new_names;
+  parse_kernel_layout(new_layout_, _shape, _names);
+  std::unordered_map name_to_arg;
+  for (const auto& arg : op->indices) {
+std::string axis_name;
+if (const auto* pimm = arg.as()) {
+  CHECK_EQ(pimm->value, 0);
+  axis_name = "IntImm";
+} else {
+  axis_name = BaseName(CleanName(Downcast(arg)->name_hint));
+  CHECK_EQ(name_to_arg.count(axis_name), 0);
+  name_to_arg[axis_name] = arg;
+}
+  }
+
+  std::unordered_map div_factors;
+  std::vector r_new_args;
+  for (int i = new_names.size() - 1; i >= 0; --i) {
+auto ori_iter_name = new_names[i];
+auto name_it = name_to_arg.find(ori_iter_name);
+CHECK(name_it != name_to_arg.end());
+PrimExpr ori_arg = name_it->second;
+
+PrimExpr mod_factor = new_shape[i];
+
+PrimExpr div_factor = 1;
+if (div_factors.count(ori_iter_name)) {
+  div_factor = div_factors[ori_iter_name];
+}
+div_factors[ori_iter_name] = div_factor * new_shape[i];
+
+PrimExpr new_arg = indexmod(indexdiv(ori_arg, div_factor), mod_factor);
+
+r_new_args.push_back(new_arg);
+  }
+
+  Array new_args(std::make_move_iterator(r_new_args.rbegin()),
+   std::make_move_iterator(r_new_args.rend()));
+  return ProducerLoad(op->producer, new_args);
+}
+return GetRef(op);
+  }
+
+ private:
+  const te::Operation& placeholder_op_;
+  const std::string& new_layout_;
+};
+
+std::string get_ori_layout(std::set* placeholder_axis_names, 
const te::Operation& op,
+   const te::Tensor& placeholder) {
+  ReadAccessExtractor extractor;
+  for (const auto& exp : op.as()->body) {
+extractor.Extract(exp);
+  }
+
+  std::ostringstream os;
+  uint i = 0;
+  const auto& placeholder_op = placeholder->op;
+  CHECK_GT(extractor.read_access.count(placeholder_op), 0);
+  for (const auto& ev : extractor.read_access[placeholder_op]) {
+for (const auto& e : ev) {
+  std::string axis_name;
+  if (const auto* pimm = e.as()) {
+CHECK_EQ(pimm->value, 0);
+axis_name = "IntImm";
+  } else {
+axis_name = BaseName(CleanName(Downcast(e)->name_hint));
+  }
+
+  placeholder_axis_names->insert(axis_name);
+  os << placeholder->shape[i++] << axis_name;
+}
+  }
+
+  CHECK_EQ(placeholder_axis_names->size(), placeholder->shape.size());
+  std::string ori_layout = os.str();
+  os.str("");
+  // TODO(minmin): uncomment this line for relay integration
+  // 
::tvm::relay::KernelLayoutTransformer::global_ori_layouts_queue.push_back(ori_layout);
+  return ori_layout;
+}
+
+std::string get_new_layout(Array* new_shape, const State& state, 
const int stage_id,
+   const Stage& stage, const te::Operation& op,
+   const te::Tensor& placeholder,
+   const std::set& 
placeholder_axis_names) {
+  std::ostringstream os;
+  Array stage_iters;
+
+  auto attach_it = state->attach_map->stage_to_attach_iter.find(stage_id);
+  int attach_pos = -1;
+  size_t iters_before_attach = 0;
+  if (attach_it != state->attach_map->stage_to_attach_iter.end()) {
+auto attach = attach_it->second;
+const auto& attach_stage = state->stages[attach.first];
+attach_pos = attach.second;
+

[GitHub] [incubator-tvm] minminsun commented on a change in pull request #6297: [Ansor][AutoTVM v2.0] Phase 2: Layout Rewrite in AutoScheduler

2020-08-26 Thread GitBox


minminsun commented on a change in pull request #6297:
URL: https://github.com/apache/incubator-tvm/pull/6297#discussion_r477112406



##
File path: src/auto_scheduler/compute_dag.cc
##
@@ -665,9 +666,349 @@ ComputeDAG::ComputeDAG(Array tensors) {
   data_ = std::move(node);
 }
 
+/*!
+ * \brief utility function for kernel_layout_transform
+ */
+inline void parse_kernel_layout(const String& layout, Array* shape,
+std::vector* axes) {
+  int32_t factor = 0;
+  std::string axis = "";
+  for (char c : std::string(layout)) {
+if (c >= 'A' && c <= 'z') {
+  axis += c;
+  if (factor != 0) {
+shape->push_back(factor);
+factor = 0;
+  }
+} else if (c >= '0' && c <= '9') {
+  factor = factor * 10 + c - '0';
+  if (!axis.empty()) {
+axes->push_back(axis);
+axis = "";
+  }
+} else {
+  LOG(FATAL) << "Invalid layout " << layout;
+}
+  }
+  if (!axis.empty()) {
+axes->push_back(axis);
+  }
+}
+
+std::string BaseName(const std::string& str) { return str.substr(0, 
str.rfind("_")); }
+
+class IndexRewriter : public StmtExprMutator {
+ public:
+  IndexRewriter(const te::Operation& placeholder_op, const std::string& 
new_layout)
+  : placeholder_op_(placeholder_op), new_layout_(new_layout) {}
+
+  PrimExpr Rewrite(PrimExpr expr) { return this->VisitExpr(expr); }
+
+  PrimExpr VisitExpr_(const ProducerLoadNode* op) final {
+te::Tensor t = Downcast(op->producer);
+if (t->op == placeholder_op_) {
+  Array new_shape;
+  std::vector new_names;
+  parse_kernel_layout(new_layout_, _shape, _names);
+  std::unordered_map name_to_arg;
+  for (const auto& arg : op->indices) {
+std::string axis_name;
+if (const auto* pimm = arg.as()) {
+  CHECK_EQ(pimm->value, 0);
+  axis_name = "IntImm";
+} else {
+  axis_name = BaseName(CleanName(Downcast(arg)->name_hint));
+  CHECK_EQ(name_to_arg.count(axis_name), 0);
+  name_to_arg[axis_name] = arg;
+}
+  }
+
+  std::unordered_map div_factors;
+  std::vector r_new_args;
+  for (int i = new_names.size() - 1; i >= 0; --i) {
+auto ori_iter_name = new_names[i];
+auto name_it = name_to_arg.find(ori_iter_name);
+CHECK(name_it != name_to_arg.end());
+PrimExpr ori_arg = name_it->second;
+
+PrimExpr mod_factor = new_shape[i];
+
+PrimExpr div_factor = 1;
+if (div_factors.count(ori_iter_name)) {
+  div_factor = div_factors[ori_iter_name];
+}
+div_factors[ori_iter_name] = div_factor * new_shape[i];
+
+PrimExpr new_arg = indexmod(indexdiv(ori_arg, div_factor), mod_factor);
+
+r_new_args.push_back(new_arg);
+  }
+
+  Array new_args(std::make_move_iterator(r_new_args.rbegin()),
+   std::make_move_iterator(r_new_args.rend()));
+  return ProducerLoad(op->producer, new_args);
+}
+return GetRef(op);
+  }
+
+ private:
+  const te::Operation& placeholder_op_;
+  const std::string& new_layout_;
+};
+
+std::string get_ori_layout(std::set* placeholder_axis_names, 
const te::Operation& op,
+   const te::Tensor& placeholder) {
+  ReadAccessExtractor extractor;
+  for (const auto& exp : op.as()->body) {
+extractor.Extract(exp);
+  }
+
+  std::ostringstream os;
+  uint i = 0;
+  const auto& placeholder_op = placeholder->op;
+  CHECK_GT(extractor.read_access.count(placeholder_op), 0);
+  for (const auto& ev : extractor.read_access[placeholder_op]) {
+for (const auto& e : ev) {
+  std::string axis_name;
+  if (const auto* pimm = e.as()) {
+CHECK_EQ(pimm->value, 0);
+axis_name = "IntImm";
+  } else {
+axis_name = BaseName(CleanName(Downcast(e)->name_hint));
+  }
+
+  placeholder_axis_names->insert(axis_name);
+  os << placeholder->shape[i++] << axis_name;
+}
+  }
+
+  CHECK_EQ(placeholder_axis_names->size(), placeholder->shape.size());
+  std::string ori_layout = os.str();
+  os.str("");
+  // TODO(minmin): uncomment this line for relay integration
+  // 
::tvm::relay::KernelLayoutTransformer::global_ori_layouts_queue.push_back(ori_layout);
+  return ori_layout;
+}
+
+std::string get_new_layout(Array* new_shape, const State& state, 
const int stage_id,
+   const Stage& stage, const te::Operation& op,
+   const te::Tensor& placeholder,
+   const std::set& 
placeholder_axis_names) {
+  std::ostringstream os;
+  Array stage_iters;
+
+  auto attach_it = state->attach_map->stage_to_attach_iter.find(stage_id);
+  int attach_pos = -1;
+  size_t iters_before_attach = 0;
+  if (attach_it != state->attach_map->stage_to_attach_iter.end()) {
+auto attach = attach_it->second;
+const auto& attach_stage = state->stages[attach.first];
+attach_pos = attach.second;
+

[GitHub] [incubator-tvm] minminsun commented on a change in pull request #6297: [Ansor][AutoTVM v2.0] Phase 2: Layout Rewrite in AutoScheduler

2020-08-26 Thread GitBox


minminsun commented on a change in pull request #6297:
URL: https://github.com/apache/incubator-tvm/pull/6297#discussion_r477112240



##
File path: src/auto_scheduler/compute_dag.cc
##
@@ -665,9 +666,349 @@ ComputeDAG::ComputeDAG(Array tensors) {
   data_ = std::move(node);
 }
 
+/*!
+ * \brief utility function for kernel_layout_transform
+ */
+inline void parse_kernel_layout(const String& layout, Array* shape,
+std::vector* axes) {
+  int32_t factor = 0;
+  std::string axis = "";
+  for (char c : std::string(layout)) {
+if (c >= 'A' && c <= 'z') {
+  axis += c;
+  if (factor != 0) {
+shape->push_back(factor);
+factor = 0;
+  }
+} else if (c >= '0' && c <= '9') {
+  factor = factor * 10 + c - '0';
+  if (!axis.empty()) {
+axes->push_back(axis);
+axis = "";
+  }
+} else {
+  LOG(FATAL) << "Invalid layout " << layout;
+}
+  }
+  if (!axis.empty()) {
+axes->push_back(axis);
+  }
+}
+
+std::string BaseName(const std::string& str) { return str.substr(0, 
str.rfind("_")); }

Review comment:
   Done.

##
File path: src/auto_scheduler/compute_dag.cc
##
@@ -665,9 +666,349 @@ ComputeDAG::ComputeDAG(Array tensors) {
   data_ = std::move(node);
 }
 
+/*!
+ * \brief utility function for kernel_layout_transform
+ */
+inline void parse_kernel_layout(const String& layout, Array* shape,
+std::vector* axes) {
+  int32_t factor = 0;
+  std::string axis = "";
+  for (char c : std::string(layout)) {
+if (c >= 'A' && c <= 'z') {
+  axis += c;
+  if (factor != 0) {
+shape->push_back(factor);
+factor = 0;
+  }
+} else if (c >= '0' && c <= '9') {
+  factor = factor * 10 + c - '0';
+  if (!axis.empty()) {
+axes->push_back(axis);
+axis = "";
+  }
+} else {
+  LOG(FATAL) << "Invalid layout " << layout;
+}
+  }
+  if (!axis.empty()) {
+axes->push_back(axis);
+  }
+}
+
+std::string BaseName(const std::string& str) { return str.substr(0, 
str.rfind("_")); }
+
+class IndexRewriter : public StmtExprMutator {
+ public:
+  IndexRewriter(const te::Operation& placeholder_op, const std::string& 
new_layout)
+  : placeholder_op_(placeholder_op), new_layout_(new_layout) {}
+
+  PrimExpr Rewrite(PrimExpr expr) { return this->VisitExpr(expr); }
+
+  PrimExpr VisitExpr_(const ProducerLoadNode* op) final {
+te::Tensor t = Downcast(op->producer);
+if (t->op == placeholder_op_) {
+  Array new_shape;
+  std::vector new_names;
+  parse_kernel_layout(new_layout_, _shape, _names);

Review comment:
   Done.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] minminsun commented on a change in pull request #6297: [Ansor][AutoTVM v2.0] Phase 2: Layout Rewrite in AutoScheduler

2020-08-26 Thread GitBox


minminsun commented on a change in pull request #6297:
URL: https://github.com/apache/incubator-tvm/pull/6297#discussion_r477111962



##
File path: src/auto_scheduler/compute_dag.cc
##
@@ -40,6 +40,7 @@
 #include 
 
 #include "../arith/pattern_match.h"
+#include "search_policy/utils.h"

Review comment:
   Done.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] minminsun commented on a change in pull request #6297: [Ansor][AutoTVM v2.0] Phase 2: Layout Rewrite in AutoScheduler

2020-08-26 Thread GitBox


minminsun commented on a change in pull request #6297:
URL: https://github.com/apache/incubator-tvm/pull/6297#discussion_r477107266



##
File path: python/tvm/auto_scheduler/measure.py
##
@@ -419,7 +419,7 @@ def timed_func():
 
 try:
 sch, args = task.compute_dag.apply_steps_from_state(
-inp.state)
+inp.state, layout_rewrite=True)

Review comment:
   Whether to do layout rewrite or not for an op is specified by attr 
layout_free_placeholders in compute definition, so it's safe to set True by 
default here.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [incubator-tvm] minminsun commented on a change in pull request #6297: [Ansor][AutoTVM v2.0] Phase 2: Layout Rewrite in AutoScheduler

2020-08-26 Thread GitBox


minminsun commented on a change in pull request #6297:
URL: https://github.com/apache/incubator-tvm/pull/6297#discussion_r477105200



##
File path: python/tvm/auto_scheduler/compute_dag.py
##
@@ -72,7 +72,7 @@ def get_init_state(self):
 """
 return State(self.init_state, self)
 
-def apply_steps_from_state(self, state):
+def apply_steps_from_state(self, state, layout_rewrite=False):

Review comment:
   done





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org