Revision: 24833
Author:   [email protected]
Date:     Thu Oct 23 10:33:49 2014 UTC
Log:      Make sure floating phi nodes are coupled to their control (2).

[email protected]

Review URL: https://codereview.chromium.org/673513004
https://code.google.com/p/v8/source/detail?r=24833

Modified:
 /branches/bleeding_edge/src/compiler/scheduler.cc
 /branches/bleeding_edge/src/compiler/scheduler.h

=======================================
--- /branches/bleeding_edge/src/compiler/scheduler.cc Wed Oct 22 11:31:12 2014 UTC +++ /branches/bleeding_edge/src/compiler/scheduler.cc Thu Oct 23 10:33:49 2014 UTC
@@ -28,14 +28,13 @@
 }


-Scheduler::Scheduler(ZonePool* zone_pool, Zone* zone, Graph* graph,
-                     Schedule* schedule)
-    : zone_pool_(zone_pool),
-      zone_(zone),
+Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
+    : zone_(zone),
       graph_(graph),
       schedule_(schedule),
       scheduled_nodes_(zone),
       schedule_root_nodes_(zone),
+      schedule_queue_(zone),
       node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
       has_floating_control_(false) {}

@@ -47,7 +46,7 @@
     ZonePool::Scope zone_scope(zone_pool);
     schedule = new (graph->zone())
         Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
-    Scheduler scheduler(zone_pool, zone_scope.zone(), graph, schedule);
+    Scheduler scheduler(zone_scope.zone(), graph, schedule);

     scheduler.BuildCFG();
     Scheduler::ComputeSpecialRPO(zone_pool, schedule);
@@ -68,6 +67,12 @@
   SchedulerData def = {NULL, 0, false, false, kUnknown};
   return def;
 }
+
+
+Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
+  DCHECK(node->id() < static_cast<int>(node_data_.size()));
+  return &node_data_[node->id()];
+}


 Scheduler::Placement Scheduler::GetPlacement(Node* node) {
@@ -80,8 +85,10 @@
         break;
       case IrOpcode::kPhi:
       case IrOpcode::kEffectPhi: {
-        // Phis and effect phis are fixed if their control inputs are.
- data->placement_ = GetPlacement(NodeProperties::GetControlInput(node)); + // Phis and effect phis are fixed if their control inputs are, whereas
+        // otherwise they are coupled to a floating control node.
+        Placement p = GetPlacement(NodeProperties::GetControlInput(node));
+        data->placement_ = (p == kFixed ? kFixed : kCoupled);
         break;
       }
 #define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
@@ -107,6 +114,49 @@
 }


+void Scheduler::IncrementUnscheduledUseCount(Node* node, Node* from) {
+  if (GetPlacement(node) == kCoupled) {
+    // Use count for coupled nodes is summed up on their control.
+    Node* control = NodeProperties::GetControlInput(node);
+    return IncrementUnscheduledUseCount(control, from);
+  }
+  ++(GetData(node)->unscheduled_count_);
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
+          node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
+          GetData(node)->unscheduled_count_);
+  }
+}
+
+
+void Scheduler::DecrementUnscheduledUseCount(Node* node, Node* from) {
+  if (GetPlacement(node) == kCoupled) {
+    // Use count for coupled nodes is summed up on their control.
+    Node* control = NodeProperties::GetControlInput(node);
+    return DecrementUnscheduledUseCount(control, from);
+  }
+  DCHECK(GetData(node)->unscheduled_count_ > 0);
+  --(GetData(node)->unscheduled_count_);
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("  Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
+          node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
+          GetData(node)->unscheduled_count_);
+  }
+  if (GetData(node)->unscheduled_count_ == 0) {
+ Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
+    schedule_queue_.push(node);
+  }
+}
+
+
+int Scheduler::GetRPONumber(BasicBlock* block) {
+  DCHECK(block->rpo_number() >= 0 &&
+ block->rpo_number() < static_cast<int>(schedule_->rpo_order_.size()));
+  DCHECK(schedule_->rpo_order_[block->rpo_number()] == block);
+  return block->rpo_number();
+}
+
+
 BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
   while (b1 != b2) {
     int b1_rpo = GetRPONumber(b1);
@@ -409,34 +459,19 @@

   void PostEdge(Node* from, int index, Node* to) {
// If the edge is from an unscheduled node, then tally it in the use count - // for all of its inputs. The same criterion will be used in ScheduleLate + // for all of its inputs. Also make sure that control edges from coupled + // nodes are not counted. The same criterion will be used in ScheduleLate
     // for decrementing use counts.
-    if (!schedule_->IsScheduled(from)) {
+ if (!schedule_->IsScheduled(from) && !IsCoupledControlEdge(from, index)) {
       DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
-      ++(scheduler_->GetData(to)->unscheduled_count_);
-      Trace("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
-            to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
-            scheduler_->GetData(to)->unscheduled_count_);
-      if (OperatorProperties::IsBasicBlockBegin(to->op()) &&
-          (from->opcode() == IrOpcode::kEffectPhi ||
-           from->opcode() == IrOpcode::kPhi) &&
-          scheduler_->GetData(to)->is_floating_control_ &&
-          !scheduler_->GetData(to)->is_connected_control_) {
- for (InputIter i = from->inputs().begin(); i != from->inputs().end();
-             ++i) {
-          if (!NodeProperties::IsControlEdge(i.edge())) {
-            ++(scheduler_->GetData(*i)->unscheduled_count_);
-            Trace(
- " Use count of #%d:%s (additional dependency of #%d:%s)++ = "
-                "%d\n",
-                (*i)->id(), (*i)->op()->mnemonic(), to->id(),
-                to->op()->mnemonic(),
-                scheduler_->GetData(*i)->unscheduled_count_);
-          }
-        }
-      }
+      scheduler_->IncrementUnscheduledUseCount(to, from);
     }
   }
+
+  bool IsCoupledControlEdge(Node* node, int index) {
+    return scheduler_->GetPlacement(node) == Scheduler::kCoupled &&
+           NodeProperties::FirstControlIndex(node) == index;
+  }

  private:
   Scheduler* scheduler_;
@@ -538,7 +573,7 @@
 class ScheduleLateNodeVisitor {
  public:
   ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
- : scheduler_(scheduler), schedule_(scheduler_->schedule_), queue_(zone) {}
+      : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}

   // Run the schedule late algorithm on a set of fixed root nodes.
   void Run(NodeVector* roots) {
@@ -549,12 +584,22 @@

  private:
   void ProcessQueue(Node* root) {
+    ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
for (InputIter i = root->inputs().begin(); i != root->inputs().end(); ++i) {
-      if (scheduler_->GetData(*i)->unscheduled_count_ != 0) continue;
-      queue_.push(*i);
-      while (!queue_.empty()) {
-        VisitNode(queue_.front());
-        queue_.pop();
+      Node* node = *i;
+
+      // Don't schedule coupled nodes on their own.
+      if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
+        node = NodeProperties::GetControlInput(node);
+      }
+
+      // Test schedulability condition by looking at unscheduled use count.
+      if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
+
+      queue->push(node);
+      while (!queue->empty()) {
+        VisitNode(queue->front());
+        queue->pop();
       }
     }
   }
@@ -563,33 +608,22 @@
   // schedule late position. Also hoists nodes out of loops to find a more
   // optimal scheduling position.
   void VisitNode(Node* node) {
-    DCHECK(scheduler_->GetData(node)->unscheduled_count_ == 0);
+    DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);

     // Don't schedule nodes that are already scheduled.
     if (schedule_->IsScheduled(node)) return;
-
-    Scheduler::SchedulerData* data = scheduler_->GetData(node);
-    DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
+    DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));

// Determine the dominating block for all of the uses of this node. It is
     // the latest block that this node can be scheduled in.
-    BasicBlock* block = NULL;
- for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
-         ++i) {
-      BasicBlock* use_block = GetBlockForUse(i.edge());
-      block = block == NULL ? use_block : use_block == NULL
-                                              ? block
- : scheduler_->GetCommonDominator(
-                                                    block, use_block);
-    }
-    DCHECK(block != NULL);
+    BasicBlock* block = GetCommonDominatorOfUses(node);
+    DCHECK_NOT_NULL(block);

-    int min_rpo = data->minimum_block_->rpo_number();
-    Trace(
-        "Schedule late conservative for #%d:%s is B%d at loop depth %d, "
-        "minimum_rpo = %d\n",
-        node->id(), node->op()->mnemonic(), block->id().ToInt(),
-        block->loop_depth(), min_rpo);
+    int min_rpo = scheduler_->GetData(node)->minimum_block_->rpo_number();
+ Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum_rpo = %d\n",
+          node->id(), node->op()->mnemonic(), block->id().ToInt(),
+          block->loop_depth(), min_rpo);
+
// Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
     // into enclosing loop pre-headers until they would preceed their
     // ScheduleEarly position.
@@ -616,21 +650,42 @@

     ScheduleNode(block, node);
   }
+
+ private:
+  BasicBlock* GetCommonDominatorOfUses(Node* node) {
+    BasicBlock* block = NULL;
+    Node::Uses uses = node->uses();
+    for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
+      BasicBlock* use_block = GetBlockForUse(i.edge());
+      block = block == NULL ? use_block : use_block == NULL
+                                              ? block
+ : scheduler_->GetCommonDominator(
+                                                    block, use_block);
+    }
+    return block;
+  }

   BasicBlock* GetBlockForUse(Node::Edge edge) {
     Node* use = edge.from();
     IrOpcode::Value opcode = use->opcode();
     if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+ // If the use is from a coupled (i.e. floating) phi, compute the common
+      // dominator of its uses. This will not recurse more than one level.
+      if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
+        Trace("  inspecting uses of coupled phi #%d:%s\n", use->id(),
+              use->op()->mnemonic());
+        DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
+        return GetCommonDominatorOfUses(use);
+      }
       // If the use is from a fixed (i.e. non-floating) phi, use the block
       // of the corresponding control input to the merge.
-      int index = edge.index();
       if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
-        Trace("  input@%d into a fixed phi #%d:%s\n", index, use->id(),
+ Trace(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
               use->op()->mnemonic());
         Node* merge = NodeProperties::GetControlInput(use, 0);
         opcode = merge->opcode();
         DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
-        use = NodeProperties::GetControlInput(merge, index);
+        use = NodeProperties::GetControlInput(merge, edge.index());
       }
     }
     BasicBlock* result = schedule_->block(use);
@@ -648,48 +703,12 @@
// schedulable. If all the uses of a node have been scheduled, then the node
     // itself can be scheduled.
for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
-      Scheduler::SchedulerData* data = scheduler_->GetData(*i);
-      DCHECK(data->unscheduled_count_ > 0);
-      --data->unscheduled_count_;
-      if (FLAG_trace_turbo_scheduler) {
- Trace(" Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
-              (*i)->op()->mnemonic(), i.edge().from()->id(),
-              i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
-      }
-      if (data->unscheduled_count_ == 0) {
- Trace(" newly eligible #%d:%s\n", (*i)->id(), (*i)->op()->mnemonic());
-        queue_.push(*i);
-      }
-    }
-
-    for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
-      Node* use = *i;
-      if (use->opcode() == IrOpcode::kPhi ||
-          use->opcode() == IrOpcode::kEffectPhi) {
-        Node* control = NodeProperties::GetControlInput(use);
-        Scheduler::SchedulerData* data = scheduler_->GetData(control);
-        if (data->is_floating_control_ && !data->is_connected_control_) {
-          --data->unscheduled_count_;
-          if (FLAG_trace_turbo_scheduler) {
-            Trace(
- " Use count for #%d:%s (additional dependency of #%d:%s)-- = "
-                "%d\n",
-                (*i)->id(), (*i)->op()->mnemonic(), node->id(),
-                node->op()->mnemonic(), data->unscheduled_count_);
-          }
-          if (data->unscheduled_count_ == 0) {
-            Trace("  newly eligible #%d:%s\n", (*i)->id(),
-                  (*i)->op()->mnemonic());
-            queue_.push(*i);
-          }
-        }
-      }
+      scheduler_->DecrementUnscheduledUseCount(*i, i.edge().from());
     }
   }

   Scheduler* scheduler_;
   Schedule* schedule_;
-  ZoneQueue<Node*> queue_;
 };


@@ -705,11 +724,8 @@
   }

   // Schedule: Places nodes in dominator block of all their uses.
-  {
-    ZonePool::Scope zone_scope(zone_pool_);
-    ScheduleLateNodeVisitor schedule_late_visitor(zone_scope.zone(), this);
-    schedule_late_visitor.Run(&schedule_root_nodes_);
-  }
+  ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
+  schedule_late_visitor.Run(&schedule_root_nodes_);

// Add collected nodes for basic blocks to their blocks in the right order.
   int block_num = 0;
=======================================
--- /branches/bleeding_edge/src/compiler/scheduler.h Tue Oct 21 16:31:51 2014 UTC +++ /branches/bleeding_edge/src/compiler/scheduler.h Thu Oct 23 10:33:49 2014 UTC
@@ -29,7 +29,7 @@
                                              Schedule* schedule);

  private:
-  enum Placement { kUnknown, kSchedulable, kFixed };
+  enum Placement { kUnknown, kSchedulable, kFixed, kCoupled };

   // Per-node data tracked during scheduling.
   struct SchedulerData {
@@ -38,38 +38,30 @@
bool is_connected_control_; // {true} if control-connected to the end node. bool is_floating_control_; // {true} if control, but not control-connected
                                  // to the end node.
-    Placement placement_ : 3;    // Whether the node is fixed, schedulable,
-                                 // or not yet known.
+    Placement placement_;        // Whether the node is fixed, schedulable,
+ // coupled to another node, or not yet known.
   };

-  ZonePool* zone_pool_;
   Zone* zone_;
   Graph* graph_;
   Schedule* schedule_;
   NodeVectorVector scheduled_nodes_;
   NodeVector schedule_root_nodes_;
+  ZoneQueue<Node*> schedule_queue_;
   ZoneVector<SchedulerData> node_data_;
   bool has_floating_control_;

- Scheduler(ZonePool* zone_pool, Zone* zone, Graph* graph, Schedule* schedule);
+  Scheduler(Zone* zone, Graph* graph, Schedule* schedule);

-  SchedulerData DefaultSchedulerData();
-
-  SchedulerData* GetData(Node* node) {
-    DCHECK(node->id() < static_cast<int>(node_data_.size()));
-    return &node_data_[node->id()];
-  }
+  inline SchedulerData DefaultSchedulerData();
+  inline SchedulerData* GetData(Node* node);

   Placement GetPlacement(Node* node);

-  int GetRPONumber(BasicBlock* block) {
-    DCHECK(block->rpo_number() >= 0 &&
-           block->rpo_number() <
-               static_cast<int>(schedule_->rpo_order_.size()));
-    DCHECK(schedule_->rpo_order_[block->rpo_number()] == block);
-    return block->rpo_number();
-  }
+  void IncrementUnscheduledUseCount(Node* node, Node* from);
+  void DecrementUnscheduledUseCount(Node* node, Node* from);

+  inline int GetRPONumber(BasicBlock* block);
   BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);

   // Phase 1: Build control-flow graph and dominator tree.

--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
--- You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Reply via email to