apeforest commented on a change in pull request #14940: Fix warnings
URL: https://github.com/apache/incubator-mxnet/pull/14940#discussion_r314589551
 
 

 ##########
 File path: src/common/exec_utils.cc
 ##########
 @@ -0,0 +1,526 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+#include "exec_utils.h"
+
+namespace mxnet {
+namespace common {
+
+
+bool SetupDefaultBlobsIn(const std::vector<NDArray>& src,
+                                const std::vector<NDArray> *bufs,
+                                std::vector<TBlob> *blobs,
+                                std::vector<NDArray> *temp_src,
+                                std::vector<NDArray> *temp_dst,
+                                std::unordered_map<uint32_t, uint32_t> 
*idx_map) {
+  bool require_cast = false;
+  for (size_t i = 0; i < src.size(); i++) {
+    auto& nd = src[i];
+    bool is_default = nd.storage_type() == kDefaultStorage;
+#if MXNET_USE_MKLDNN == 1
+    // We have to make sure it's default storage and default layout.
+    is_default = nd.IsDefaultData();
+#endif
+    if (!is_default) {
+      (*idx_map)[i] = temp_dst->size();
+      NDArray temp = bufs != nullptr ? bufs->at(i) : NDArray(nd.shape(), 
nd.ctx(),
+                                                             true, nd.dtype());
+#if MXNET_USE_MKLDNN == 1
+      CHECK(temp.IsDefaultData());
+#endif
+      temp_src->emplace_back(nd);
+      temp_dst->emplace_back(temp);
+      blobs->emplace_back(temp.data());
+      require_cast = true;
+    } else {
+      blobs->push_back(nd.data());
+    }
+  }
+  return require_cast;
+}
+
+bool SetupDefaultBlobsOut(const std::vector<NDArray>& src,
+                                 const std::vector<NDArray> *bufs,
+                                 std::vector<OpReqType> *req,
+                                 std::vector<TBlob> *blobs,
+                                 std::vector<NDArray> *temp_src,
+                                 std::vector<NDArray> *temp_dst) {
+  bool require_cast = false;
+  for (size_t i = 0; i < src.size(); i++) {
+    auto& nd = src[i];
+    bool is_default = nd.storage_type() == kDefaultStorage;
+#if MXNET_USE_MKLDNN == 1
+    if (req->at(i) == kWriteInplace && nd.IsMKLDNNData())
+      // If it's write inplace and the output array doesn't use the default
+      // layout, we'll generate a temporary output array below, which means
+      // the input array and the output array are no longer the same array.
+      // we should change the request type.
+      req->at(i) = kWriteTo;
+    // We have to make sure it's default storage and default layout.
+    is_default = nd.IsDefaultData();
+#endif
+    if (!is_default) {
+#if MXNET_USE_MKLDNN == 1
+      NDArray temp;
+      if (bufs != nullptr) {
+        temp = bufs->at(i);
+      } else if (kAddTo == req->at(i) && nd.IsMKLDNNData()) {
+        temp = nd.Reorder2Default();
+      } else if (kAddTo == req->at(i)) {
+        temp = nd;
+      } else {
+        temp = NDArray(nd.shape(), nd.ctx(), true, nd.dtype());
+      }
+      CHECK(temp.IsDefaultData());
+#else
+      NDArray temp = bufs != nullptr ? bufs->at(i) : NDArray(nd.shape(), 
nd.ctx(),
+          true, nd.dtype());
+#endif
+      temp_src->emplace_back(nd);
+      temp_dst->emplace_back(temp);
+      blobs->emplace_back(temp.data());
+      require_cast = true;
+    } else {
+      blobs->push_back(nd.data());
+    }
+  }
+  return require_cast;
+}
+
+
+void SetupDefaultBlobsInOut(const std::vector<NDArray> &ndinputs,
+                                   const std::vector<NDArray> &ndoutputs,
+                                   const std::vector<NDArray> *in_bufs,
+                                   const std::vector<NDArray> *out_bufs,
+                                   std::vector<OpReqType> *req,
+                                   std::vector<TBlob> *input_blobs,
+                                   std::vector<TBlob> *output_blobs,
+                                   std::vector<NDArray> *pre_temp_src,
+                                   std::vector<NDArray> *pre_temp_dst,
+                                   std::vector<NDArray> *post_temp_src,
+                                   std::vector<NDArray> *post_temp_dst,
+                                   std::unordered_map<uint32_t, uint32_t> 
*in_temp_idx_map,
+                                   const std::vector<uint32_t> &mutate_idx) {
+  // populate input blobs
+  SetupDefaultBlobsIn(ndinputs, in_bufs, input_blobs, pre_temp_src, 
pre_temp_dst,
+                      in_temp_idx_map);
+  // populate output blobs
+  SetupDefaultBlobsOut(ndoutputs, out_bufs, req, output_blobs, post_temp_dst,
+                       post_temp_src);
+  // add mutable inputs to post temp list
+  for (const auto idx : mutate_idx) {
+    auto map_iter = in_temp_idx_map->find(idx);
+    if (map_iter != in_temp_idx_map->end()) {
+      post_temp_src->push_back(pre_temp_dst->at(map_iter->second));
+      post_temp_dst->push_back(ndinputs[idx]);
+    }
+  }
+}
+
+
+void CastNonDefaultStorage(const std::vector<NDArray>& src,
+                                  const std::vector<NDArray>& dst,
+                                  const OpContext& ctx,
+                                  const bool is_gpu) {
+  CHECK_EQ(dst.size(), src.size());
+  for (size_t i = 0; i < src.size(); i++) {
+    if (is_gpu) {
+#if MXNET_USE_CUDA
+      CastStorageDispatch<gpu>(ctx, src[i], dst[i]);
+#else
+      LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR;
+#endif
+    } else {
+      CastStorageDispatch<cpu>(ctx, src[i], dst[i]);
+    }
+  }
+}
+
+
+bool SameType(const nnvm::NodeAttrs& attrs,
+                     std::vector<int> *iattr,
+                     std::vector<int> *oattr) {
+  int def_v = -1;
+  for (int v : *oattr) {
+    if (v != -1) {
+      def_v = v; break;
+    }
+  }
+  if (def_v == -1) {
+    for (int v : *iattr) {
+      if (v != -1) {
+        def_v = v; break;
+      }
+    }
+  }
+  if (def_v == -1) return false;
+  for (int& v : *oattr) {
+    v = def_v;
+  }
+  for (int& v : *iattr) {
+    v = def_v;
+  }
+  return true;
+}
+
+
+
+bool DefaultStorageType(const nnvm::NodeAttrs& attrs,
+                               const int dev_mask,
+                               DispatchMode* dispatch_mode,
+                               std::vector<int> *iattr,
+                               std::vector<int> *oattr) {
+  bool fallback = false;
+  for (int& v : *oattr) {
+    if (v == -1) v = kDefaultStorage;
+    if (v != kDefaultStorage) fallback = true;
+  }
+  for (int& v : *iattr) {
+    if (v == -1) v = kDefaultStorage;
+    if (v != kDefaultStorage) fallback = true;
+  }
+  if (*dispatch_mode == DispatchMode::kUndefined) {
+    if (fallback) {
+      *dispatch_mode = DispatchMode::kFComputeFallback;
+    } else {
+      *dispatch_mode = DispatchMode::kFCompute;
+    }
+  }
+  return true;
+}
+
+std::string storage_str(int storage_id) {
+  std::string str;
+  if (storage_id == -1) {
+    str = "var (-1)";
+  } else if (storage_id == -2) {
+    str = "external storage (-2)";
+  } else {
+    str = "group " + std::to_string(storage_id);
+  }
+  return str;
+}
+
+
+void LogMemoryPlan(const nnvm::Graph& g) {
+  const auto &idx = g.indexed_graph();
+  const auto& vshape = g.GetAttr<mxnet::ShapeVector>("shape");
+  const auto& vtype = g.GetAttr<nnvm::DTypeVector>("dtype");
+  const auto& vstorage = g.GetAttr<nnvm::StorageVector>("storage_id");
+  // find node range
+  uint32_t node_start = 0, node_end = idx.num_nodes();
+  if (g.attrs.count("node_range")) {
+    const auto& range = g.GetAttr<std::pair<uint32_t, uint32_t> 
>("node_range");
+    node_start = range.first;
+    node_end = range.second;
+  }
+  for (uint32_t nid = node_start; nid < node_end; ++nid) {
+    const auto& inode = idx[nid];
+    if (inode.source->is_variable()) {
+      LOG(INFO) << "node " << nid << " var";
+    } else {
+      LOG(INFO) << "node " << nid << " " << inode.source->attrs.op->name;
+      for (const auto& e : inode.inputs) {
+        auto eid = idx.entry_id(e);
+        size_t kilo_bytes = vshape[eid].Size() * 
mshadow::mshadow_sizeof(vtype[eid]) / 1024;
+        LOG(INFO) << "\t\tinput " << eid << ": " << vshape[eid] << " ("
+                  << kilo_bytes << " KB) -> " << storage_str(vstorage[eid]);
+      }
+      for (uint32_t index = 0; index < inode.source->num_outputs(); ++index) {
+        uint32_t eid = idx.entry_id(nid, index);
+        size_t kilo_bytes = vshape[eid].Size() * 
mshadow::mshadow_sizeof(vtype[eid]) / 1024;
+        LOG(INFO) << "\t\toutput " << eid << ": " << vshape[eid] << " ("
+                  << kilo_bytes << " KB) -> " << storage_str(vstorage[eid]);
+      }
+    }
+  }
+}
+
+
 
 Review comment:
   please fix before resolving.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to