This is an automated email from the ASF dual-hosted git repository.

jxie pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-mxnet.git


The following commit(s) were added to refs/heads/master by this push:
     new 9bfd9e1  rm duplicated and unused code (#7764)
9bfd9e1 is described below

commit 9bfd9e14dc51188e76a6253838744bb9e7de2c5e
Author: formath <[email protected]>
AuthorDate: Fri Sep 8 01:30:45 2017 +0800

    rm duplicated and unused code (#7764)
    
    * rm not use variables
    
    * rm duplicated and unused code
    
    * bug
---
 src/c_api/c_api_executor.cc    |  3 ---
 src/executor/graph_executor.cc | 11 +++++------
 2 files changed, 5 insertions(+), 9 deletions(-)

diff --git a/src/c_api/c_api_executor.cc b/src/c_api/c_api_executor.cc
index 631c1a7..8be3965 100644
--- a/src/c_api/c_api_executor.cc
+++ b/src/c_api/c_api_executor.cc
@@ -422,9 +422,6 @@ int MXExecutorSimpleBind(SymbolHandle symbol_handle,
 
   // create shared_buffer_map
   std::unordered_map<std::string, NDArray> shared_buffer_map;
-  std::vector<NDArray> shared_exec_in_args;
-  std::vector<NDArray> shared_exec_arg_grads;
-  std::vector<NDArray> shared_exec_aux_states;
   bool use_shared_buffer = (*shared_buffer_len >= 0);
   if (*shared_buffer_len > 0) {
     // create shared_buffer_map
diff --git a/src/executor/graph_executor.cc b/src/executor/graph_executor.cc
index 9c43983..cfc7fe4 100644
--- a/src/executor/graph_executor.cc
+++ b/src/executor/graph_executor.cc
@@ -1335,8 +1335,8 @@ void GraphExecutor::InitOpSegs() {
     num_nodes_threshold = std::numeric_limits<size_t>::max();
   }
 
-  // create forward segments for training
-  if (prefer_bulk_exec > 0) {
+  if (prefer_bulk_exec) {
+    // create forward segments for training
     size_t topo_start = 0;
     for (size_t nid = 0; nid < num_forward_nodes_; nid++) {
       auto &node = graph_.indexed_graph()[nid].source;
@@ -1354,17 +1354,15 @@ void GraphExecutor::InitOpSegs() {
     if (topo_start != num_forward_nodes_) {
       cached_seg_opr_[topo_start] = this->CreateCachedSegOpr(topo_start, 
num_forward_nodes_);
     }
-  }
 
-  // create backward segments for training
-  if (prefer_bulk_exec) {
+    // create backward segments for training
     // get all gradient variables
     std::unordered_set<engine::VarHandle> grad_vars;
     for (auto &kv : grad_store_) {
       grad_vars.insert(kv.second.var());
     }
     auto &idx = graph_.indexed_graph();
-    size_t topo_start = num_forward_nodes_;
+    topo_start = num_forward_nodes_;
     for (size_t nid = num_forward_nodes_; nid < total_num_nodes; nid++) {
       auto &op_node = op_nodes_[nid];
       if (op_node.skip_exec_node || op_node.exec == nullptr) {
@@ -1393,6 +1391,7 @@ void GraphExecutor::InitOpSegs() {
       cached_seg_opr_[topo_start] = this->CreateCachedSegOpr(topo_start, 
total_num_nodes);
     }
   }
+
   return;
 }
 

-- 
To stop receiving notification emails like this one, please contact
['"[email protected]" <[email protected]>'].

Reply via email to