Revision: 25138
Author: [email protected]
Date: Wed Nov 5 10:10:28 2014 UTC
Log: Make special RPO computation iterative during scheduling.
This contains the following changes squashed together:
- Switch BasicBlock::loop_end to be a basic block instead of an RPO.
- Switch ScheduleLate to use dominator depth instead of RPO.
- Switch ScheduleEarly to use dominator depth instead of RPO.
- Push out absolute RPO ordering everywhere else in the scheduler.
- Keep linked list of blocks in RPO order while scheduling.
- Switch from RPO number to depth for dominator calculation.
[email protected]
Review URL: https://codereview.chromium.org/696363002
https://code.google.com/p/v8/source/detail?r=25138
Modified:
/branches/bleeding_edge/src/compiler/instruction.cc
/branches/bleeding_edge/src/compiler/schedule.cc
/branches/bleeding_edge/src/compiler/schedule.h
/branches/bleeding_edge/src/compiler/scheduler.cc
/branches/bleeding_edge/src/compiler/scheduler.h
/branches/bleeding_edge/test/cctest/compiler/test-instruction.cc
/branches/bleeding_edge/test/cctest/compiler/test-scheduler.cc
/branches/bleeding_edge/test/unittests/compiler/register-allocator-unittest.cc
=======================================
--- /branches/bleeding_edge/src/compiler/instruction.cc Tue Nov 4 09:21:12
2014 UTC
+++ /branches/bleeding_edge/src/compiler/instruction.cc Wed Nov 5 10:10:28
2014 UTC
@@ -346,7 +346,7 @@
static BasicBlock::RpoNumber GetLoopEndRpo(const BasicBlock* block) {
if (!block->IsLoopHeader()) return BasicBlock::RpoNumber::Invalid();
- return BasicBlock::RpoNumber::FromInt(block->loop_end());
+ return block->loop_end()->GetRpoNumber();
}
=======================================
--- /branches/bleeding_edge/src/compiler/schedule.cc Fri Oct 24 13:48:02
2014 UTC
+++ /branches/bleeding_edge/src/compiler/schedule.cc Wed Nov 5 10:10:28
2014 UTC
@@ -16,10 +16,11 @@
: ao_number_(-1),
rpo_number_(-1),
deferred_(false),
+ dominator_depth_(-1),
dominator_(NULL),
loop_header_(NULL),
+ loop_end_(NULL),
loop_depth_(0),
- loop_end_(-1),
control_(kNone),
control_input_(NULL),
nodes_(zone),
@@ -32,8 +33,9 @@
// RPO numbers must be initialized.
DCHECK(rpo_number_ >= 0);
DCHECK(block->rpo_number_ >= 0);
- if (loop_end_ < 0) return false; // This is not a loop.
- return block->rpo_number_ >= rpo_number_ && block->rpo_number_ <
loop_end_;
+ if (loop_end_ == NULL) return false; // This is not a loop.
+ return block->rpo_number_ >= rpo_number_ &&
+ block->rpo_number_ < loop_end_->rpo_number_;
}
@@ -58,6 +60,11 @@
void BasicBlock::set_control_input(Node* control_input) {
control_input_ = control_input;
}
+
+
+void BasicBlock::set_dominator_depth(int32_t dominator_depth) {
+ dominator_depth_ = dominator_depth;
+}
void BasicBlock::set_dominator(BasicBlock* dominator) {
@@ -75,7 +82,7 @@
}
-void BasicBlock::set_loop_end(int32_t loop_end) { loop_end_ = loop_end; }
+void BasicBlock::set_loop_end(BasicBlock* loop_end) { loop_end_ =
loop_end; }
void BasicBlock::set_loop_header(BasicBlock* loop_header) {
=======================================
--- /branches/bleeding_edge/src/compiler/schedule.h Mon Oct 27 09:36:51
2014 UTC
+++ /branches/bleeding_edge/src/compiler/schedule.h Wed Nov 5 10:10:28
2014 UTC
@@ -144,6 +144,9 @@
bool deferred() const { return deferred_; }
void set_deferred(bool deferred) { deferred_ = deferred; }
+
+ int32_t dominator_depth() const { return dominator_depth_; }
+ void set_dominator_depth(int32_t dominator_depth);
BasicBlock* dominator() const { return dominator_; }
void set_dominator(BasicBlock* dominator);
@@ -151,12 +154,12 @@
BasicBlock* loop_header() const { return loop_header_; }
void set_loop_header(BasicBlock* loop_header);
+ BasicBlock* loop_end() const { return loop_end_; }
+ void set_loop_end(BasicBlock* loop_end);
+
int32_t loop_depth() const { return loop_depth_; }
void set_loop_depth(int32_t loop_depth);
- int32_t loop_end() const { return loop_end_; }
- void set_loop_end(int32_t loop_end);
-
RpoNumber GetAoNumber() const { return RpoNumber::FromInt(ao_number_); }
int32_t ao_number() const { return ao_number_; }
void set_ao_number(int32_t ao_number) { ao_number_ = ao_number; }
@@ -166,19 +169,20 @@
void set_rpo_number(int32_t rpo_number);
// Loop membership helpers.
- inline bool IsLoopHeader() const { return loop_end_ >= 0; }
+ inline bool IsLoopHeader() const { return loop_end_ != NULL; }
bool LoopContains(BasicBlock* block) const;
private:
int32_t ao_number_; // assembly order number of the block.
int32_t rpo_number_; // special RPO number of the block.
bool deferred_; // true if the block contains deferred code.
+ int32_t dominator_depth_; // Depth within the dominator tree.
BasicBlock* dominator_; // Immediate dominator of the block.
BasicBlock* loop_header_; // Pointer to dominating loop header basic
block,
// NULL if none. For loop headers, this
points to
// enclosing loop header.
+ BasicBlock* loop_end_; // end of the loop, if this block is a loop
header.
int32_t loop_depth_; // loop nesting, 0 is top-level
- int32_t loop_end_; // end of the loop, if this block is a loop
header.
Control control_; // Control at the end of the block.
Node* control_input_; // Input value for control.
=======================================
--- /branches/bleeding_edge/src/compiler/scheduler.cc Mon Nov 3 10:30:34
2014 UTC
+++ /branches/bleeding_edge/src/compiler/scheduler.cc Wed Nov 5 10:10:28
2014 UTC
@@ -52,6 +52,8 @@
scheduler.ScheduleEarly();
scheduler.ScheduleLate();
+ scheduler.SealFinalSchedule();
+
return schedule;
}
@@ -209,22 +211,13 @@
schedule_queue_.push(node);
}
}
-
-
-int Scheduler::GetRPONumber(BasicBlock* block) {
- DCHECK(block->rpo_number() >= 0 &&
- block->rpo_number() <
static_cast<int>(schedule_->rpo_order_.size()));
- DCHECK(schedule_->rpo_order_[block->rpo_number()] == block);
- return block->rpo_number();
-}
BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
while (b1 != b2) {
- int b1_rpo = GetRPONumber(b1);
- int b2_rpo = GetRPONumber(b2);
- DCHECK(b1_rpo != b2_rpo);
- if (b1_rpo < b2_rpo) {
+ int32_t b1_depth = b1->dominator_depth();
+ int32_t b2_depth = b2->dominator_depth();
+ if (b1_depth < b2_depth) {
b2 = b2->dominator();
} else {
b1 = b1->dominator();
@@ -529,23 +522,164 @@
// 2. All loops are contiguous in the order (i.e. no intervening blocks
that
// do not belong to the loop.)
// Note a simple RPO traversal satisfies (1) but not (2).
-class SpecialRPONumberer {
+class SpecialRPONumberer : public ZoneObject {
public:
SpecialRPONumberer(Zone* zone, Schedule* schedule)
- : zone_(zone), schedule_(schedule) {}
+ : zone_(zone),
+ schedule_(schedule),
+ order_(NULL),
+ loops_(zone),
+ beyond_end_(NULL) {}
+ // Computes the special reverse-post-order for the main control flow
graph,
+ // that is for the graph spanned between the schedule's start and end
blocks.
void ComputeSpecialRPO() {
- // RPO should not have been computed for this schedule yet.
+ DCHECK_EQ(NULL, order_); // Main order does not exist yet.
+ // TODO(mstarzinger): Should use Schedule::end() after tests are fixed.
+ ComputeAndInsertSpecialRPO(schedule_->start(), NULL);
+ }
+
+ // Computes the special reverse-post-order for a partial control flow
graph,
+ // that is for the graph spanned between the given {entry} and {end}
blocks,
+ // then updates the existing ordering with this new information.
+ void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
+ DCHECK_NE(NULL, order_); // Main order to be updated is present.
+ ComputeAndInsertSpecialRPO(entry, end);
+ }
+
+ // Serialize the previously computed order as an assembly order
(non-deferred
+ // code first, deferred code afterwards) into the final schedule.
+ void SerializeAOIntoSchedule() {
+ int32_t number = 0;
+ for (BlockList* l = order_; l != NULL; l = l->next) {
+ if (l->block->deferred()) continue;
+ l->block->set_ao_number(number++);
+ }
+ for (BlockList* l = order_; l != NULL; l = l->next) {
+ if (!l->block->deferred()) continue;
+ l->block->set_ao_number(number++);
+ }
+ }
+
+ // Serialize the previously computed order as a special
reverse-post-order
+ // numbering for basic blocks into the final schedule.
+ void SerializeRPOIntoSchedule() {
+ int32_t number = 0;
+ for (BlockList* l = order_; l != NULL; l = l->next) {
+ l->block->set_rpo_number(number++);
+ schedule_->rpo_order()->push_back(l->block);
+ }
+ BeyondEndSentinel()->set_rpo_number(number);
+ }
+
+ // Print and verify the special reverse-post-order.
+ void PrintAndVerifySpecialRPO() {
+#if DEBUG
+ if (FLAG_trace_turbo_scheduler) PrintRPO();
+ VerifySpecialRPO();
+#endif
+ }
+
+ private:
+ // TODO(mstarzinger): Only for Scheduler::GenerateImmediateDominatorTree.
+ friend class Scheduler;
+
+ // Numbering for BasicBlockData.rpo_number_ for this block traversal:
+ static const int kBlockOnStack = -2;
+ static const int kBlockVisited1 = -3;
+ static const int kBlockVisited2 = -4;
+ static const int kBlockUnvisited1 = -1;
+ static const int kBlockUnvisited2 = kBlockVisited1;
+
+ struct SpecialRPOStackFrame {
+ BasicBlock* block;
+ size_t index;
+ };
+
+ struct BlockList {
+ BasicBlock* block;
+ BlockList* next;
+
+ BlockList* Add(Zone* zone, BasicBlock* b) {
+ BlockList* list =
static_cast<BlockList*>(zone->New(sizeof(BlockList)));
+ list->block = b;
+ list->next = this;
+ return list;
+ }
+
+ BlockList* FindForBlock(BasicBlock* b) {
+ for (BlockList* l = this; l != NULL; l = l->next) {
+ if (l->block == b) return l;
+ }
+ return NULL;
+ }
+ };
+
+ struct LoopInfo {
+ BasicBlock* header;
+ ZoneList<BasicBlock*>* outgoing;
+ BitVector* members;
+ LoopInfo* prev;
+ BlockList* end;
+ BlockList* start;
+
+ void AddOutgoing(Zone* zone, BasicBlock* block) {
+ if (outgoing == NULL) {
+ outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
+ }
+ outgoing->Add(block, zone);
+ }
+ };
+
+ int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
+ int unvisited) {
+ if (child->rpo_number() == unvisited) {
+ stack[depth].block = child;
+ stack[depth].index = 0;
+ child->set_rpo_number(kBlockOnStack);
+ return depth + 1;
+ }
+ return depth;
+ }
+
+ // We are hijacking the {ao_number} to enumerate loops temporarily. Note
that
+ // these numbers are only valid within this class.
+ static int GetLoopNumber(BasicBlock* block) { return block->ao_number();
}
+ static void SetLoopNumber(BasicBlock* block, int loop_number) {
+ return block->set_ao_number(loop_number);
+ }
+ static bool HasLoopNumber(BasicBlock* block) {
+ return block->ao_number() >= 0;
+ }
+
+ // TODO(mstarzinger): We only need this special sentinel because some
tests
+ // use the schedule's end block in actual control flow (e.g. with end
having
+ // successors). Once this has been cleaned up we can use the end block
here.
+ BasicBlock* BeyondEndSentinel() {
+ if (beyond_end_ == NULL) {
+ BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
+ beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(),
id);
+ }
+ return beyond_end_;
+ }
+
+ // Compute special RPO for the control flow graph between {entry} and
{end},
+ // mutating any existing order so that the result is still valid.
+ void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
+ // RPO should not have been serialized for this schedule yet.
+ CHECK_EQ(kBlockUnvisited1, schedule_->start()->ao_number());
CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
+ // Find correct insertion point within existing order.
+ BlockList* insert_before = order_->FindForBlock(entry);
+ BlockList* insert_after = insert_before ? insert_before->next : NULL;
+
// Perform an iterative RPO traversal using an explicit stack,
// recording backedges that form cycles. O(|B|).
ZoneList<std::pair<BasicBlock*, size_t> > backedges(1, zone_);
SpecialRPOStackFrame* stack = zone_->NewArray<SpecialRPOStackFrame>(
static_cast<int>(schedule_->BasicBlockCount()));
- BasicBlock* entry = schedule_->start();
- BlockList* order = NULL;
int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
int num_loops = 0;
@@ -553,7 +687,8 @@
int current = stack_depth - 1;
SpecialRPOStackFrame* frame = stack + current;
- if (frame->index < frame->block->SuccessorCount()) {
+ if (frame->block != end &&
+ frame->index < frame->block->SuccessorCount()) {
// Process the next successor.
BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
if (succ->rpo_number() == kBlockVisited1) continue;
@@ -562,9 +697,9 @@
backedges.Add(
std::pair<BasicBlock*, size_t>(frame->block, frame->index -
1),
zone_);
- if (succ->loop_end() < 0) {
+ if (!HasLoopNumber(succ)) {
// Assign a new loop number to the header if it doesn't have
one.
- succ->set_loop_end(num_loops++);
+ SetLoopNumber(succ, num_loops++);
}
} else {
// Push the successor onto the stack.
@@ -573,23 +708,32 @@
}
} else {
// Finished with all successors; pop the stack and add the block.
- order = order->Add(zone_, frame->block);
+ insert_after = insert_after->Add(zone_, frame->block);
frame->block->set_rpo_number(kBlockVisited1);
stack_depth--;
}
}
+
+ // Insert the result into any existing order.
+ if (insert_before == NULL) {
+ order_ = insert_after;
+ } else {
+ // There already is a list element for the entry block in the list,
hence
+ // we skip the first element of the sub-list to compensate
duplication.
+ DCHECK_EQ(insert_before->block, insert_after->block);
+ insert_before->next = insert_after->next;
+ }
// If no loops were encountered, then the order we computed was
correct.
- LoopInfo* loops = NULL;
if (num_loops != 0) {
// Otherwise, compute the loop information from the backedges in
order
// to perform a traversal that groups loop bodies together.
- loops = ComputeLoopInfo(stack, num_loops,
schedule_->BasicBlockCount(),
- &backedges);
+ ComputeLoopInfo(stack, num_loops, &backedges);
// Initialize the "loop stack". Note the entry could be a loop
header.
- LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end()] :
NULL;
- order = NULL;
+ LoopInfo* loop =
+ HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
+ order_ = NULL;
// Perform an iterative post-order traversal, visiting loop bodies
before
// edges that lead out of loops. Visits each block once, but linking
loop
@@ -604,14 +748,14 @@
if (frame->index < block->SuccessorCount()) {
// Process the next normal successor.
succ = block->SuccessorAt(frame->index++);
- } else if (block->IsLoopHeader()) {
+ } else if (HasLoopNumber(block)) {
// Process additional outgoing edges from the loop header.
if (block->rpo_number() == kBlockOnStack) {
// Finish the loop body the first time the header is left on
the
// stack.
DCHECK(loop != NULL && loop->header == block);
- loop->start = order->Add(zone_, block);
- order = loop->end;
+ loop->start = order_->Add(zone_, block);
+ order_ = loop->end;
block->set_rpo_number(kBlockVisited2);
// Pop the loop stack and continue visiting outgoing edges
within
// the context of the outer loop, if any.
@@ -623,7 +767,7 @@
// Use the next outgoing edge if there are any.
int outgoing_index =
static_cast<int>(frame->index - block->SuccessorCount());
- LoopInfo* info = &loops[block->loop_end()];
+ LoopInfo* info = &loops_[GetLoopNumber(block)];
DCHECK(loop != info);
if (info->outgoing != NULL &&
outgoing_index < info->outgoing->length()) {
@@ -644,53 +788,47 @@
} else {
// Push the successor onto the stack.
stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
- if (succ->IsLoopHeader()) {
+ if (HasLoopNumber(succ)) {
// Push the inner loop onto the loop stack.
- DCHECK(succ->loop_end() >= 0 && succ->loop_end() <
num_loops);
- LoopInfo* next = &loops[succ->loop_end()];
- next->end = order;
+ DCHECK(GetLoopNumber(succ) < num_loops);
+ LoopInfo* next = &loops_[GetLoopNumber(succ)];
+ next->end = order_;
next->prev = loop;
loop = next;
}
}
} else {
// Finished with all successors of the current block.
- if (block->IsLoopHeader()) {
+ if (HasLoopNumber(block)) {
// If we are going to pop a loop header, then add its entire
body.
- LoopInfo* info = &loops[block->loop_end()];
+ LoopInfo* info = &loops_[GetLoopNumber(block)];
for (BlockList* l = info->start; true; l = l->next) {
if (l->next == info->end) {
- l->next = order;
- info->end = order;
+ l->next = order_;
+ info->end = order_;
break;
}
}
- order = info->start;
+ order_ = info->start;
} else {
// Pop a single node off the stack and add it to the order.
- order = order->Add(zone_, block);
+ order_ = order_->Add(zone_, block);
block->set_rpo_number(kBlockVisited2);
}
stack_depth--;
}
}
}
-
- // Construct the final order from the list.
- BasicBlockVector* final_order = schedule_->rpo_order();
- order->Serialize(final_order);
// Compute the correct loop headers and set the correct loop ends.
LoopInfo* current_loop = NULL;
BasicBlock* current_header = NULL;
int loop_depth = 0;
- for (BasicBlockVectorIter i = final_order->begin(); i !=
final_order->end();
- ++i) {
- BasicBlock* current = *i;
+ for (BlockList* l = order_; l != NULL; l = l->next) {
+ BasicBlock* current = l->block;
// Finish the previous loop(s) if we just exited them.
- while (current_header != NULL &&
- current->rpo_number() >= current_header->loop_end()) {
+ while (current_header != NULL && current ==
current_header->loop_end()) {
DCHECK(current_header->IsLoopHeader());
DCHECK(current_loop != NULL);
current_loop = current_loop->prev;
@@ -700,13 +838,11 @@
current->set_loop_header(current_header);
// Push a new loop onto the stack if this loop is a loop header.
- if (current->IsLoopHeader()) {
+ if (HasLoopNumber(current)) {
loop_depth++;
- current_loop = &loops[current->loop_end()];
+ current_loop = &loops_[GetLoopNumber(current)];
BlockList* end = current_loop->end;
- current->set_loop_end(end == NULL
- ? static_cast<int>(final_order->size())
- : end->block->rpo_number());
+ current->set_loop_end(end == NULL ? BeyondEndSentinel() :
end->block);
current_header = current_loop->header;
Trace("B%d is a loop header, increment loop depth to %d\n",
current->id().ToInt(), loop_depth);
@@ -722,109 +858,31 @@
current->loop_header()->id().ToInt(), current->loop_depth());
}
}
-
- // Compute the assembly order (non-deferred code first, deferred code
- // afterwards).
- int32_t number = 0;
- for (auto block : *final_order) {
- if (block->deferred()) continue;
- block->set_ao_number(number++);
- }
- for (auto block : *final_order) {
- if (!block->deferred()) continue;
- block->set_ao_number(number++);
- }
-
-#if DEBUG
- if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops,
final_order);
- VerifySpecialRPO(num_loops, loops, final_order);
-#endif
- }
-
- private:
- // Numbering for BasicBlockData.rpo_number_ for this block traversal:
- static const int kBlockOnStack = -2;
- static const int kBlockVisited1 = -3;
- static const int kBlockVisited2 = -4;
- static const int kBlockUnvisited1 = -1;
- static const int kBlockUnvisited2 = kBlockVisited1;
-
- struct SpecialRPOStackFrame {
- BasicBlock* block;
- size_t index;
- };
-
- struct BlockList {
- BasicBlock* block;
- BlockList* next;
-
- BlockList* Add(Zone* zone, BasicBlock* b) {
- BlockList* list =
static_cast<BlockList*>(zone->New(sizeof(BlockList)));
- list->block = b;
- list->next = this;
- return list;
- }
-
- void Serialize(BasicBlockVector* final_order) {
- for (BlockList* l = this; l != NULL; l = l->next) {
- l->block->set_rpo_number(static_cast<int>(final_order->size()));
- final_order->push_back(l->block);
- }
- }
- };
-
- struct LoopInfo {
- BasicBlock* header;
- ZoneList<BasicBlock*>* outgoing;
- BitVector* members;
- LoopInfo* prev;
- BlockList* end;
- BlockList* start;
-
- void AddOutgoing(Zone* zone, BasicBlock* block) {
- if (outgoing == NULL) {
- outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
- }
- outgoing->Add(block, zone);
- }
- };
-
- int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
- int unvisited) {
- if (child->rpo_number() == unvisited) {
- stack[depth].block = child;
- stack[depth].index = 0;
- child->set_rpo_number(kBlockOnStack);
- return depth + 1;
- }
- return depth;
}
// Computes loop membership from the backedges of the control flow graph.
- LoopInfo* ComputeLoopInfo(
- SpecialRPOStackFrame* queue, int num_loops, size_t num_blocks,
- ZoneList<std::pair<BasicBlock*, size_t> >* backedges) {
- LoopInfo* loops = zone_->NewArray<LoopInfo>(num_loops);
- memset(loops, 0, num_loops * sizeof(LoopInfo));
+ void ComputeLoopInfo(SpecialRPOStackFrame* queue, size_t num_loops,
+ ZoneList<std::pair<BasicBlock*, size_t> >*
backedges) {
+ loops_.resize(num_loops, LoopInfo());
// Compute loop membership starting from backedges.
// O(max(loop_depth) * max(|loop|)
for (int i = 0; i < backedges->length(); i++) {
BasicBlock* member = backedges->at(i).first;
BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
- int loop_num = header->loop_end();
- if (loops[loop_num].header == NULL) {
- loops[loop_num].header = header;
- loops[loop_num].members =
- new (zone_) BitVector(static_cast<int>(num_blocks), zone_);
+ size_t loop_num = GetLoopNumber(header);
+ if (loops_[loop_num].header == NULL) {
+ loops_[loop_num].header = header;
+ loops_[loop_num].members = new (zone_)
+ BitVector(static_cast<int>(schedule_->BasicBlockCount()),
zone_);
}
int queue_length = 0;
if (member != header) {
// As long as the header doesn't have a backedge to itself,
// Push the member onto the queue and process its predecessors.
- if (!loops[loop_num].members->Contains(member->id().ToInt())) {
- loops[loop_num].members->Add(member->id().ToInt());
+ if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
+ loops_[loop_num].members->Add(member->id().ToInt());
}
queue[queue_length++].block = member;
}
@@ -836,47 +894,46 @@
for (size_t i = 0; i < block->PredecessorCount(); i++) {
BasicBlock* pred = block->PredecessorAt(i);
if (pred != header) {
- if (!loops[loop_num].members->Contains(pred->id().ToInt())) {
- loops[loop_num].members->Add(pred->id().ToInt());
+ if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
+ loops_[loop_num].members->Add(pred->id().ToInt());
queue[queue_length++].block = pred;
}
}
}
}
}
- return loops;
}
#if DEBUG
- void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) {
+ void PrintRPO() {
OFStream os(stdout);
- os << "-- RPO with " << num_loops << " loops ";
- if (num_loops > 0) {
- os << "(";
- for (int i = 0; i < num_loops; i++) {
+ os << "RPO with " << loops_.size() << " loops";
+ if (loops_.size() > 0) {
+ os << " (";
+ for (size_t i = 0; i < loops_.size(); i++) {
if (i > 0) os << " ";
- os << "B" << loops[i].header->id();
+ os << "B" << loops_[i].header->id();
}
- os << ") ";
+ os << ")";
}
- os << "-- \n";
+ os << ":\n";
- for (size_t i = 0; i < order->size(); i++) {
- BasicBlock* block = (*order)[i];
+ for (BlockList* l = order_; l != NULL; l = l->next) {
+ BasicBlock* block = l->block;
BasicBlock::Id bid = block->id();
// TODO(jarin,svenpanne): Add formatting here once we have support
for
- // that in streams (we want an equivalent of PrintF("%5d:", i) here).
- os << i << ":";
- for (int j = 0; j < num_loops; j++) {
- bool membership = loops[j].members->Contains(bid.ToInt());
- bool range = loops[j].header->LoopContains(block);
+ // that in streams (we want an equivalent of PrintF("%5d:", x) here).
+ os << " " << block->rpo_number() << ":";
+ for (size_t j = 0; j < loops_.size(); j++) {
+ bool membership = loops_[j].members->Contains(bid.ToInt());
+ bool range = loops_[j].header->LoopContains(block);
os << (membership ? " |" : " ");
os << (range ? "x" : " ");
}
os << " B" << bid << ": ";
- if (block->loop_end() >= 0) {
- os << " range: [" << block->rpo_number() << ", " <<
block->loop_end()
- << ")";
+ if (block->loop_end() != NULL) {
+ os << " range: [" << block->rpo_number() << ", "
+ << block->loop_end()->rpo_number() << ")";
}
if (block->loop_header() != NULL) {
os << " header: B" << block->loop_header()->id();
@@ -888,21 +945,22 @@
}
}
- void VerifySpecialRPO(int num_loops, LoopInfo* loops,
- BasicBlockVector* order) {
+ void VerifySpecialRPO() {
+ BasicBlockVector* order = schedule_->rpo_order();
DCHECK(order->size() > 0);
DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first.
- for (int i = 0; i < num_loops; i++) {
- LoopInfo* loop = &loops[i];
+ for (size_t i = 0; i < loops_.size(); i++) {
+ LoopInfo* loop = &loops_[i];
BasicBlock* header = loop->header;
+ BasicBlock* end = header->loop_end();
DCHECK(header != NULL);
DCHECK(header->rpo_number() >= 0);
DCHECK(header->rpo_number() < static_cast<int>(order->size()));
- DCHECK(header->loop_end() >= 0);
- DCHECK(header->loop_end() <= static_cast<int>(order->size()));
- DCHECK(header->loop_end() > header->rpo_number());
+ DCHECK(end != NULL);
+ DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
+ DCHECK(end->rpo_number() > header->rpo_number());
DCHECK(header->loop_header() != header);
// Verify the start ... end list relationship.
@@ -922,15 +980,16 @@
DCHECK(links < static_cast<int>(2 * order->size())); // cycle?
}
DCHECK(links > 0);
- DCHECK(links == (header->loop_end() - header->rpo_number()));
+ DCHECK(links == end->rpo_number() - header->rpo_number());
DCHECK(end_found);
// Check the contiguousness of loops.
- int count = 0;
+ // TODO(mstarzinger): Loop membership bit-vector becomes stale.
+ /*int count = 0;
for (int j = 0; j < static_cast<int>(order->size()); j++) {
BasicBlock* block = order->at(j);
DCHECK(block->rpo_number() == j);
- if (j < header->rpo_number() || j >= header->loop_end()) {
+ if (j < header->rpo_number() || j >= end->rpo_number()) {
DCHECK(!loop->members->Contains(block->id().ToInt()));
} else {
if (block == header) {
@@ -941,13 +1000,16 @@
count++;
}
}
- DCHECK(links == count);
+ DCHECK(links == count);*/
}
}
#endif // DEBUG
Zone* zone_;
Schedule* schedule_;
+ BlockList* order_;
+ ZoneVector<LoopInfo> loops_;
+ BasicBlock* beyond_end_;
};
@@ -958,6 +1020,9 @@
SpecialRPONumberer numberer(zone, schedule);
numberer.ComputeSpecialRPO();
+ numberer.SerializeAOIntoSchedule();
+ numberer.SerializeRPOIntoSchedule();
+ numberer.PrintAndVerifySpecialRPO();
return schedule->rpo_order();
}
@@ -965,40 +1030,39 @@
void Scheduler::ComputeSpecialRPONumbering() {
Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n");
- SpecialRPONumberer numberer(zone_, schedule_);
- numberer.ComputeSpecialRPO();
+ // Compute the special reverse-post-order for basic blocks.
+ special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
+ special_rpo_->ComputeSpecialRPO();
}
void Scheduler::GenerateImmediateDominatorTree() {
Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
- // Build the dominator graph.
- // TODO(danno): consider using Lengauer & Tarjan's if this becomes too
slow.
- for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
- BasicBlock* current_rpo = schedule_->rpo_order_[i];
- if (current_rpo != schedule_->start()) {
- BasicBlock::Predecessors::iterator current_pred =
- current_rpo->predecessors_begin();
- BasicBlock::Predecessors::iterator end =
current_rpo->predecessors_end();
- DCHECK(current_pred != end);
- BasicBlock* dominator = *current_pred;
- ++current_pred;
- // For multiple predecessors, walk up the RPO ordering until a common
- // dominator is found.
- int current_rpo_pos = GetRPONumber(current_rpo);
- while (current_pred != end) {
- // Don't examine backwards edges
- BasicBlock* pred = *current_pred;
- if (GetRPONumber(pred) < current_rpo_pos) {
- dominator = GetCommonDominator(dominator, *current_pred);
- }
- ++current_pred;
- }
- current_rpo->set_dominator(dominator);
- Trace("Block %d's idom is %d\n", current_rpo->id().ToInt(),
- dominator->id().ToInt());
+ // TODO(danno): Consider using Lengauer & Tarjan's if this becomes too
slow.
+
+ // Build the block dominator tree.
+ schedule_->start()->set_dominator_depth(0);
+ typedef SpecialRPONumberer::BlockList BlockList;
+ for (BlockList* l = special_rpo_->order_; l != NULL; l = l->next) {
+ BasicBlock* current = l->block;
+ if (current == schedule_->start()) continue;
+ BasicBlock::Predecessors::iterator pred =
current->predecessors_begin();
+ BasicBlock::Predecessors::iterator end = current->predecessors_end();
+ DCHECK(pred != end); // All blocks except start have predecessors.
+ BasicBlock* dominator = *pred;
+ // For multiple predecessors, walk up the dominator tree until a common
+ // dominator is found. Visitation order guarantees that all
predecessors
+ // except for backwards edges have been visited.
+ for (++pred; pred != end; ++pred) {
+ // Don't examine backwards edges.
+ if ((*pred)->dominator_depth() < 0) continue;
+ dominator = GetCommonDominator(dominator, *pred);
}
+ current->set_dominator(dominator);
+ current->set_dominator_depth(dominator->dominator_depth() + 1);
+ Trace("Block B%d's idom is B%d, depth = %d\n", current->id().ToInt(),
+ dominator->id().ToInt(), current->dominator_depth());
}
}
@@ -1087,8 +1151,10 @@
if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
DCHECK_EQ(schedule_->start(), data->minimum_block_);
data->minimum_block_ = schedule_->block(node);
- Trace("Fixing #%d:%s minimum_rpo = %d\n", node->id(),
- node->op()->mnemonic(), data->minimum_block_->rpo_number());
+ Trace("Fixing #%d:%s minimum_block = B%d, dominator_depth = %d\n",
+ node->id(), node->op()->mnemonic(),
+ data->minimum_block_->id().ToInt(),
+ data->minimum_block_->dominator_depth());
}
// No need to propagate unconstrained schedule early positions.
@@ -1098,14 +1164,14 @@
DCHECK(data->minimum_block_ != NULL);
Node::Uses uses = node->uses();
for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
- PropagateMinimumRPOToNode(data->minimum_block_, *i);
+ PropagateMinimumPositionToNode(data->minimum_block_, *i);
}
}
- // Propagates {block} as another minimum RPO placement into the given
{node}.
- // This has the net effect of computing the maximum of the minimum RPOs
for
- // all inputs to {node} when the queue has been fully processed.
- void PropagateMinimumRPOToNode(BasicBlock* block, Node* node) {
+ // Propagates {block} as another minimum position into the given {node}.
This
+ // has the net effect of computing the minimum dominator block of {node}
that
+ // still post-dominates all inputs to {node} when the queue is processed.
+ void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
Scheduler::SchedulerData* data = scheduler_->GetData(node);
// No need to propagate to fixed node, it's guaranteed to be a root.
@@ -1114,17 +1180,29 @@
// Coupled nodes influence schedule early position of their control.
if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
Node* control = NodeProperties::GetControlInput(node);
- PropagateMinimumRPOToNode(block, control);
+ PropagateMinimumPositionToNode(block, control);
}
- // Propagate new position if it is larger than the current.
- if (block->rpo_number() > data->minimum_block_->rpo_number()) {
+ // Propagate new position if it is deeper down the dominator tree than
the
+ // current. Note that all inputs need to have minimum block position
inside
+ // the dominator chain of {node}'s minimum block position.
+ DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
+ if (block->dominator_depth() >
data->minimum_block_->dominator_depth()) {
data->minimum_block_ = block;
queue_.push(node);
- Trace("Propagating #%d:%s minimum_rpo = %d\n", node->id(),
- node->op()->mnemonic(), data->minimum_block_->rpo_number());
+ Trace("Propagating #%d:%s minimum_block = B%d, dominator_depth
= %d\n",
+ node->id(), node->op()->mnemonic(),
+ data->minimum_block_->id().ToInt(),
+ data->minimum_block_->dominator_depth());
}
}
+
+#if DEBUG
+ bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
+ BasicBlock* dominator = scheduler_->GetCommonDominator(b1, b2);
+ return dominator == b1 || dominator == b2;
+ }
+#endif
Scheduler* scheduler_;
Schedule* schedule_;
@@ -1136,14 +1214,13 @@
Trace("--- SCHEDULE EARLY -----------------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
Trace("roots: ");
- for (NodeVectorIter i = schedule_root_nodes_.begin();
- i != schedule_root_nodes_.end(); ++i) {
- Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
+ for (Node* node : schedule_root_nodes_) {
+ Trace("#%d:%s ", node->id(), node->op()->mnemonic());
}
Trace("\n");
}
- // Compute the minimum RPO for each node thereby determining the earliest
+ // Compute the minimum block for each node thereby determining the
earliest
// position each node could be placed within a valid schedule.
ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
schedule_early_visitor.Run(&schedule_root_nodes_);
@@ -1204,17 +1281,20 @@
BasicBlock* block = GetCommonDominatorOfUses(node);
DCHECK_NOT_NULL(block);
- int min_rpo = scheduler_->GetData(node)->minimum_block_->rpo_number();
- Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum_rpo
= %d\n",
+ // The schedule early block dominates the schedule late block.
+ BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
+ DCHECK_EQ(min_block, scheduler_->GetCommonDominator(block, min_block));
+ Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum =
B%d\n",
node->id(), node->op()->mnemonic(), block->id().ToInt(),
- block->loop_depth(), min_rpo);
+ block->loop_depth(), min_block->id().ToInt());
// Hoist nodes out of loops if possible. Nodes can be hoisted
iteratively
- // into enclosing loop pre-headers until they would preceed their
- // ScheduleEarly position.
+ // into enclosing loop pre-headers until they would preceed their
schedule
+ // early position.
BasicBlock* hoist_block = GetPreHeader(block);
- while (hoist_block != NULL && hoist_block->rpo_number() >= min_rpo) {
- Trace(" hoisting #%d:%s to block %d\n", node->id(),
+ while (hoist_block != NULL &&
+ hoist_block->dominator_depth() >= min_block->dominator_depth())
{
+ Trace(" hoisting #%d:%s to block B%d\n", node->id(),
node->op()->mnemonic(), hoist_block->id().ToInt());
DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
block = hoist_block;
@@ -1302,9 +1382,8 @@
Trace("--- SCHEDULE LATE ------------------------------------------\n");
if (FLAG_trace_turbo_scheduler) {
Trace("roots: ");
- for (NodeVectorIter i = schedule_root_nodes_.begin();
- i != schedule_root_nodes_.end(); ++i) {
- Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
+ for (Node* node : schedule_root_nodes_) {
+ Trace("#%d:%s ", node->id(), node->op()->mnemonic());
}
Trace("\n");
}
@@ -1312,15 +1391,29 @@
// Schedule: Places nodes in dominator block of all their uses.
ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
schedule_late_visitor.Run(&schedule_root_nodes_);
+}
+
+
+//
-----------------------------------------------------------------------------
+// Phase 6: Seal the final schedule.
+
+
+void Scheduler::SealFinalSchedule() {
+ Trace("--- SEAL FINAL SCHEDULE ------------------------------------\n");
+
+ // Serialize the assembly order and reverse-post-order numbering.
+ special_rpo_->SerializeAOIntoSchedule();
+ special_rpo_->SerializeRPOIntoSchedule();
+ special_rpo_->PrintAndVerifySpecialRPO();
// Add collected nodes for basic blocks to their blocks in the right
order.
int block_num = 0;
- for (NodeVectorVectorIter i = scheduled_nodes_.begin();
- i != scheduled_nodes_.end(); ++i) {
- for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
- schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
+ for (NodeVector& nodes : scheduled_nodes_) {
+ BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
+ BasicBlock* block = schedule_->GetBlockById(id);
+ for (NodeVectorRIter i = nodes.rbegin(); i != nodes.rend(); ++i) {
+ schedule_->AddNode(block, *i);
}
- block_num++;
}
}
@@ -1341,27 +1434,22 @@
// Iterate on phase 2: Compute special RPO and dominator tree.
// TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
- BasicBlockVector* rpo = schedule_->rpo_order();
- for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) {
- BasicBlock* block = *i;
+ for (BasicBlock* block : schedule_->all_blocks_) {
block->set_rpo_number(-1);
- block->set_loop_header(NULL);
- block->set_loop_depth(0);
- block->set_loop_end(-1);
+ block->set_dominator_depth(-1);
+ block->set_dominator(NULL);
}
- schedule_->rpo_order()->clear();
- SpecialRPONumberer numberer(zone_, schedule_);
- numberer.ComputeSpecialRPO();
+ special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
GenerateImmediateDominatorTree();
- scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
// Move previously planned nodes.
// TODO(mstarzinger): Improve that by supporting bulk moves.
+ scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
MovePlannedNodes(block, schedule_->block(node));
if (FLAG_trace_turbo_scheduler) {
OFStream os(stdout);
- os << "Schedule after control flow fusion:" << *schedule_;
+ os << "Schedule after control flow fusion:\n" << *schedule_;
}
}
=======================================
--- /branches/bleeding_edge/src/compiler/scheduler.h Wed Oct 29 17:37:10
2014 UTC
+++ /branches/bleeding_edge/src/compiler/scheduler.h Wed Nov 5 10:10:28
2014 UTC
@@ -16,6 +16,8 @@
namespace internal {
namespace compiler {
+class SpecialRPONumberer;
+
// Computes a schedule from a graph, placing nodes into basic blocks and
// ordering the basic blocks in the special RPO order.
class Scheduler {
@@ -60,6 +62,7 @@
NodeVector schedule_root_nodes_; // Fixed root nodes seed the
worklist.
ZoneQueue<Node*> schedule_queue_; // Worklist of schedulable nodes.
ZoneVector<SchedulerData> node_data_; // Per-node data for all nodes.
+ SpecialRPONumberer* special_rpo_; // Special RPO numbering of
blocks.
Scheduler(Zone* zone, Graph* graph, Schedule* schedule);
@@ -73,7 +76,6 @@
void IncrementUnscheduledUseCount(Node* node, int index, Node* from);
void DecrementUnscheduledUseCount(Node* node, int index, Node* from);
- inline int GetRPONumber(BasicBlock* block);
BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
// Phase 1: Build control-flow graph.
@@ -97,6 +99,9 @@
friend class ScheduleLateNodeVisitor;
void ScheduleLate();
+ // Phase 6: Seal the final schedule.
+ void SealFinalSchedule();
+
void FuseFloatingControl(BasicBlock* block, Node* node);
void MovePlannedNodes(BasicBlock* from, BasicBlock* to);
};
=======================================
--- /branches/bleeding_edge/test/cctest/compiler/test-instruction.cc Mon
Nov 3 13:41:56 2014 UTC
+++ /branches/bleeding_edge/test/cctest/compiler/test-instruction.cc Wed
Nov 5 10:10:28 2014 UTC
@@ -139,7 +139,7 @@
BasicBlock* block = *i;
CHECK_EQ(block->rpo_number(), R.BlockAt(block)->rpo_number().ToInt());
CHECK_EQ(block->id().ToInt(), R.BlockAt(block)->id().ToInt());
- CHECK_EQ(-1, block->loop_end());
+ CHECK_EQ(NULL, block->loop_end());
}
}
=======================================
--- /branches/bleeding_edge/test/cctest/compiler/test-scheduler.cc Wed Oct
29 15:27:27 2014 UTC
+++ /branches/bleeding_edge/test/cctest/compiler/test-scheduler.cc Wed Nov
5 10:10:28 2014 UTC
@@ -30,7 +30,7 @@
for (int i = 0; i < static_cast<int>(order->size()); i++) {
CHECK(order->at(i)->rpo_number() == i);
if (!loops_allowed) {
- CHECK_LT(order->at(i)->loop_end(), 0);
+ CHECK_EQ(NULL, order->at(i)->loop_end());
CHECK_EQ(NULL, order->at(i)->loop_header());
}
}
@@ -40,19 +40,21 @@
static void CheckLoop(BasicBlockVector* order, BasicBlock** blocks,
int body_size) {
BasicBlock* header = blocks[0];
- CHECK_GT(header->loop_end(), 0);
- CHECK_EQ(body_size, (header->loop_end() - header->rpo_number()));
+ BasicBlock* end = header->loop_end();
+ CHECK_NE(NULL, end);
+ CHECK_GT(end->rpo_number(), 0);
+ CHECK_EQ(body_size, end->rpo_number() - header->rpo_number());
for (int i = 0; i < body_size; i++) {
- int num = blocks[i]->rpo_number();
- CHECK(num >= header->rpo_number() && num < header->loop_end());
+ CHECK_GE(blocks[i]->rpo_number(), header->rpo_number());
+ CHECK_LT(blocks[i]->rpo_number(), end->rpo_number());
CHECK(header->LoopContains(blocks[i]));
CHECK(header->IsLoopHeader() || blocks[i]->loop_header() == header);
}
if (header->rpo_number() > 0) {
CHECK_NE(order->at(header->rpo_number() - 1)->loop_header(), header);
}
- if (header->loop_end() < static_cast<int>(order->size())) {
- CHECK_NE(order->at(header->loop_end())->loop_header(), header);
+ if (end->rpo_number() < static_cast<int>(order->size())) {
+ CHECK_NE(order->at(end->rpo_number())->loop_header(), header);
}
}
=======================================
---
/branches/bleeding_edge/test/unittests/compiler/register-allocator-unittest.cc
Tue Nov 4 09:21:12 2014 UTC
+++
/branches/bleeding_edge/test/unittests/compiler/register-allocator-unittest.cc
Wed Nov 5 10:10:28 2014 UTC
@@ -97,7 +97,7 @@
if (loop_header.IsValid()) {
basic_block->set_loop_depth(1);
basic_block->set_loop_header(basic_blocks_[loop_header.ToSize()]);
- basic_block->set_loop_end(loop_end.ToInt());
+ basic_block->set_loop_end(basic_blocks_[loop_end.ToSize()]);
}
InstructionBlock* instruction_block =
new (zone()) InstructionBlock(zone(), basic_block);
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.