Revision: 24717
Author: [email protected]
Date: Mon Oct 20 07:32:01 2014 UTC
Log: [turbofan] decouple register allocation from schedule and graph
[email protected], [email protected]
BUG=
Review URL: https://codereview.chromium.org/664683002
https://code.google.com/p/v8/source/detail?r=24717
Modified:
/branches/bleeding_edge/src/compiler/instruction-selector.cc
/branches/bleeding_edge/src/compiler/instruction.cc
/branches/bleeding_edge/src/compiler/instruction.h
/branches/bleeding_edge/src/compiler/register-allocator.cc
/branches/bleeding_edge/src/compiler/register-allocator.h
/branches/bleeding_edge/src/compiler/schedule.cc
/branches/bleeding_edge/src/compiler/schedule.h
=======================================
--- /branches/bleeding_edge/src/compiler/instruction-selector.cc Tue Oct 14
11:57:06 2014 UTC
+++ /branches/bleeding_edge/src/compiler/instruction-selector.cc Mon Oct 20
07:32:01 2014 UTC
@@ -853,8 +853,17 @@
void InstructionSelector::VisitPhi(Node* node) {
// TODO(bmeurer): Emit a PhiInstruction here.
- for (InputIter i = node->inputs().begin(); i != node->inputs().end();
++i) {
- MarkAsUsed(*i);
+ PhiInstruction* phi = new (instruction_zone())
+ PhiInstruction(instruction_zone(),
sequence()->GetVirtualRegister(node));
+
sequence()->InstructionBlockAt(current_block_->GetRpoNumber())->AddPhi(phi);
+ Node::Inputs inputs = node->inputs();
+ size_t j = 0;
+ for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+ ++iter, ++j) {
+ MarkAsUsed(*iter);
+ // TODO(mstarzinger): Use a ValueInputIterator instead.
+ if (j >= current_block_->PredecessorCount()) continue;
+ phi->operands().push_back(sequence()->GetVirtualRegister(*iter));
}
}
=======================================
--- /branches/bleeding_edge/src/compiler/instruction.cc Tue Oct 14 09:22:21
2014 UTC
+++ /branches/bleeding_edge/src/compiler/instruction.cc Mon Oct 20 07:32:01
2014 UTC
@@ -315,14 +315,77 @@
UNREACHABLE();
return os;
}
+
+
+static BasicBlock::RpoNumber GetRpo(BasicBlock* block) {
+ if (block == NULL) return BasicBlock::RpoNumber::Invalid();
+ return block->GetRpoNumber();
+}
+
+
+static BasicBlock::RpoNumber GetLoopEndRpo(const BasicBlock* block) {
+ if (!block->IsLoopHeader()) return BasicBlock::RpoNumber::Invalid();
+ return BasicBlock::RpoNumber::FromInt(block->loop_end());
+}
+
+
+InstructionBlock::InstructionBlock(Zone* zone, const BasicBlock* block)
+ : successors_(block->SuccessorCount(),
BasicBlock::RpoNumber::Invalid(),
+ zone),
+ predecessors_(block->PredecessorCount(),
BasicBlock::RpoNumber::Invalid(),
+ zone),
+ phis_(zone),
+ rpo_number_(block->GetRpoNumber()),
+ loop_header_(GetRpo(block->loop_header())),
+ loop_end_(GetLoopEndRpo(block)),
+ code_start_(-1),
+ code_end_(-1) {
+ // Map successors and precessors
+ size_t index = 0;
+ for (BasicBlock::Successors::const_iterator it =
block->successors_begin();
+ it != block->successors_end(); ++it, ++index) {
+ successors_[index] = (*it)->GetRpoNumber();
+ }
+ index = 0;
+ for (BasicBlock::Predecessors::const_iterator
+ it = block->predecessors_begin();
+ it != block->predecessors_end(); ++it, ++index) {
+ predecessors_[index] = (*it)->GetRpoNumber();
+ }
+}
+
+
+size_t InstructionBlock::PredecessorIndexOf(
+ BasicBlock::RpoNumber rpo_number) const {
+ size_t j = 0;
+ for (InstructionBlock::Predecessors::const_iterator i =
predecessors_.begin();
+ i != predecessors_.end(); ++i, ++j) {
+ if (*i == rpo_number) break;
+ }
+ return j;
+}
+
+
+static void InitializeInstructionBlocks(Zone* zone, const Schedule*
schedule,
+ InstructionBlocks* blocks) {
+ DCHECK(blocks->size() == schedule->rpo_order()->size());
+ size_t rpo_number = 0;
+ for (BasicBlockVector::const_iterator it =
schedule->rpo_order()->begin();
+ it != schedule->rpo_order()->end(); ++it, ++rpo_number) {
+ DCHECK_EQ(NULL, (*blocks)[rpo_number]);
+ DCHECK((*it)->GetRpoNumber().ToSize() == rpo_number);
+ (*blocks)[rpo_number] = new (zone) InstructionBlock(zone, *it);
+ }
+}
InstructionSequence::InstructionSequence(Linkage* linkage, Graph* graph,
Schedule* schedule)
- : zone_(schedule->zone()),
+ : zone_(schedule->zone()), // TODO(dcarney): new zone.
node_count_(graph->NodeCount()),
node_map_(zone()->NewArray<int>(node_count_)),
- block_data_(static_cast<int>(schedule->BasicBlockCount()), zone()),
+ instruction_blocks_(static_cast<int>(schedule->rpo_order()->size()),
NULL,
+ zone()),
linkage_(linkage),
schedule_(schedule),
constants_(ConstantMap::key_compare(),
@@ -337,6 +400,7 @@
for (int i = 0; i < node_count_; ++i) {
node_map_[i] = -1;
}
+ InitializeInstructionBlocks(zone(), schedule, &instruction_blocks_);
}
@@ -406,6 +470,20 @@
}
}
}
+
+
+const InstructionBlock* InstructionSequence::GetInstructionBlock(
+ int instruction_index) const {
+ // TODO(turbofan): Optimize this.
+ for (;;) {
+ DCHECK_LE(0, instruction_index);
+ Instruction* instruction = InstructionAt(instruction_index--);
+ if (instruction->IsBlockStart()) {
+ return instruction_blocks_
+
[BlockStartInstruction::cast(instruction)->rpo_number().ToSize()];
+ }
+ }
+}
bool InstructionSequence::IsReference(int virtual_register) const {
=======================================
--- /branches/bleeding_edge/src/compiler/instruction.h Wed Oct 15 08:23:24
2014 UTC
+++ /branches/bleeding_edge/src/compiler/instruction.h Mon Oct 20 07:32:01
2014 UTC
@@ -753,6 +753,81 @@
std::ostream& operator<<(std::ostream& os, const Constant& constant);
+
+// TODO(dcarney): this is a temporary hack. turn into an actual
instruction.
+class PhiInstruction : public ZoneObject {
+ public:
+ PhiInstruction(Zone* zone, int virtual_register)
+ : virtual_register_(virtual_register), operands_(zone) {}
+
+ int virtual_register() const { return virtual_register_; }
+ const IntVector& operands() const { return operands_; }
+ IntVector& operands() { return operands_; }
+
+ private:
+ const int virtual_register_;
+ IntVector operands_;
+};
+
+
+// Analogue of BasicBlock for Instructions instead of Nodes.
+class InstructionBlock : public ZoneObject {
+ public:
+ explicit InstructionBlock(Zone* zone, const BasicBlock* block);
+
+ // Instruction indexes (used by the register allocator).
+ int first_instruction_index() const {
+ DCHECK(code_start_ >= 0);
+ DCHECK(code_end_ > 0);
+ DCHECK(code_end_ >= code_start_);
+ return code_start_;
+ }
+ int last_instruction_index() const {
+ DCHECK(code_start_ >= 0);
+ DCHECK(code_end_ > 0);
+ DCHECK(code_end_ >= code_start_);
+ return code_end_ - 1;
+ }
+
+ int32_t code_start() const { return code_start_; }
+ void set_code_start(int32_t start) { code_start_ = start; }
+
+ int32_t code_end() const { return code_end_; }
+ void set_code_end(int32_t end) { code_end_ = end; }
+
+ BasicBlock::RpoNumber rpo_number() const { return rpo_number_; }
+ BasicBlock::RpoNumber loop_header() const { return loop_header_; }
+ BasicBlock::RpoNumber loop_end() const {
+ DCHECK(IsLoopHeader());
+ return loop_end_;
+ }
+ inline bool IsLoopHeader() const { return loop_end_.IsValid(); }
+
+ typedef ZoneVector<BasicBlock::RpoNumber> Predecessors;
+ const Predecessors& predecessors() const { return predecessors_; }
+ size_t PredecessorCount() const { return predecessors_.size(); }
+ size_t PredecessorIndexOf(BasicBlock::RpoNumber rpo_number) const;
+
+ typedef ZoneVector<BasicBlock::RpoNumber> Successors;
+ const Successors& successors() const { return successors_; }
+ size_t SuccessorCount() const { return successors_.size(); }
+
+ typedef ZoneVector<PhiInstruction*> PhiInstructions;
+ const PhiInstructions& phis() const { return phis_; }
+ void AddPhi(PhiInstruction* phi) { phis_.push_back(phi); }
+
+ private:
+ Successors successors_;
+ Predecessors predecessors_;
+ PhiInstructions phis_;
+ // TODO(dcarney): probably dont't need this.
+ BasicBlock::RpoNumber rpo_number_;
+ BasicBlock::RpoNumber loop_header_;
+ BasicBlock::RpoNumber loop_end_;
+ int32_t code_start_; // start index of arch-specific code.
+ int32_t code_end_; // end index of arch-specific code.
+};
+
typedef ZoneDeque<Constant> ConstantDeque;
typedef std::map<int, Constant, std::less<int>,
zone_allocator<std::pair<int, Constant> > > ConstantMap;
@@ -760,6 +835,7 @@
typedef ZoneDeque<Instruction*> InstructionDeque;
typedef ZoneDeque<PointerMap*> PointerMapDeque;
typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
+typedef ZoneVector<InstructionBlock*> InstructionBlocks;
// Represents architecture-specific generated code before, during, and
after
// register allocation.
@@ -774,18 +850,32 @@
int node_count() const { return node_count_; }
int BasicBlockCount() const {
- return static_cast<int>(schedule_->rpo_order()->size());
+ return static_cast<int>(instruction_blocks_.size());
}
BasicBlock* BlockAt(int rpo_number) const {
return (*schedule_->rpo_order())[rpo_number];
}
- BasicBlock* GetContainingLoop(BasicBlock* block) {
- return block->loop_header();
+ InstructionBlock* InstructionBlockAt(BasicBlock::RpoNumber rpo_number) {
+ return GetBlock(rpo_number);
+ }
+
+ const InstructionBlock* InstructionBlockAt(
+ BasicBlock::RpoNumber rpo_number) const {
+ return GetBlock(rpo_number);
+ }
+
+ // TODO(dcarney): move to register allocator.
+ const InstructionBlock* GetContainingLoop(
+ const InstructionBlock* block) const {
+ BasicBlock::RpoNumber index = block->loop_header();
+ if (!index.IsValid()) return NULL;
+ return instruction_blocks_[index.ToInt()];
}
BasicBlock* GetBasicBlock(int instruction_index);
+ const InstructionBlock* GetInstructionBlock(int instruction_index) const;
int GetVirtualRegister(const Node* node);
// TODO(dcarney): find a way to remove this.
@@ -828,26 +918,32 @@
void StartBlock(BasicBlock* block);
void EndBlock(BasicBlock* block);
void set_code_start(BasicBlock* block, int start) {
- return GetBlockData(block->GetRpoNumber()).set_code_start(start);
+ return GetBlock(block->GetRpoNumber())->set_code_start(start);
}
void set_code_end(BasicBlock* block, int end) {
- return GetBlockData(block->GetRpoNumber()).set_code_end(end);
+ return GetBlock(block->GetRpoNumber())->set_code_end(end);
}
// TODO(dcarney): use RpoNumber for all of the below.
int code_start(BasicBlock::RpoNumber rpo_number) const {
- return GetBlockData(rpo_number).code_start();
+ return GetBlock(rpo_number)->code_start();
}
int code_start(BasicBlock* block) const {
- return GetBlockData(block->GetRpoNumber()).code_start();
+ return GetBlock(block->GetRpoNumber())->code_start();
}
int code_end(BasicBlock* block) const {
- return GetBlockData(block->GetRpoNumber()).code_end();
+ return GetBlock(block->GetRpoNumber())->code_end();
}
int first_instruction_index(BasicBlock* block) const {
- return GetBlockData(block->GetRpoNumber()).first_instruction_index();
+ return GetBlock(block->GetRpoNumber())->first_instruction_index();
}
int last_instruction_index(BasicBlock* block) const {
- return GetBlockData(block->GetRpoNumber()).last_instruction_index();
+ return GetBlock(block->GetRpoNumber())->last_instruction_index();
+ }
+ int first_instruction_index(InstructionBlock* block) const {
+ return GetBlock(block->rpo_number())->first_instruction_index();
+ }
+ int last_instruction_index(InstructionBlock* block) const {
+ return GetBlock(block->rpo_number())->last_instruction_index();
}
int AddConstant(Node* node, Constant constant) {
@@ -892,51 +988,19 @@
int GetFrameStateDescriptorCount();
private:
- class BlockData {
- public:
- BlockData() : code_start_(-1), code_end_(-1) {}
- // Instruction indexes (used by the register allocator).
- int first_instruction_index() const {
- DCHECK(code_start_ >= 0);
- DCHECK(code_end_ > 0);
- DCHECK(code_end_ >= code_start_);
- return code_start_;
- }
- int last_instruction_index() const {
- DCHECK(code_start_ >= 0);
- DCHECK(code_end_ > 0);
- DCHECK(code_end_ >= code_start_);
- return code_end_ - 1;
- }
-
- int32_t code_start() const { return code_start_; }
- void set_code_start(int32_t start) { code_start_ = start; }
-
- int32_t code_end() const { return code_end_; }
- void set_code_end(int32_t end) { code_end_ = end; }
-
- private:
- int32_t code_start_; // start index of arch-specific code.
- int32_t code_end_; // end index of arch-specific code.
- };
-
- const BlockData& GetBlockData(BasicBlock::RpoNumber rpo_number) const {
- return block_data_[rpo_number.ToSize()];
- }
- BlockData& GetBlockData(BasicBlock::RpoNumber rpo_number) {
- return block_data_[rpo_number.ToSize()];
+ InstructionBlock* GetBlock(BasicBlock::RpoNumber rpo_number) const {
+ return instruction_blocks_[rpo_number.ToSize()];
}
friend std::ostream& operator<<(std::ostream& os,
const InstructionSequence& code);
typedef std::set<int, std::less<int>, ZoneIntAllocator>
VirtualRegisterSet;
- typedef ZoneVector<BlockData> BlockDataVector;
Zone* zone_;
int node_count_;
int* node_map_;
- BlockDataVector block_data_;
+ InstructionBlocks instruction_blocks_;
Linkage* linkage_;
Schedule* schedule_;
ConstantMap constants_;
=======================================
--- /branches/bleeding_edge/src/compiler/register-allocator.cc Tue Oct 14
08:51:22 2014 UTC
+++ /branches/bleeding_edge/src/compiler/register-allocator.cc Mon Oct 20
07:32:01 2014 UTC
@@ -4,7 +4,6 @@
#include "src/compiler/register-allocator.h"
-#include "src/compiler/generic-node-inl.h"
#include "src/compiler/linkage.h"
#include "src/hydrogen.h"
#include "src/string-stream.h"
@@ -524,46 +523,40 @@
}
-BitVector* RegisterAllocator::ComputeLiveOut(BasicBlock* block) {
+BitVector* RegisterAllocator::ComputeLiveOut(const InstructionBlock*
block) {
// Compute live out for the given block, except not including backward
// successor edges.
BitVector* live_out =
new (zone()) BitVector(code()->VirtualRegisterCount(), zone());
// Process all successor blocks.
- for (BasicBlock::Successors::iterator i = block->successors_begin();
- i != block->successors_end(); ++i) {
+ for (auto succ : block->successors()) {
// Add values live on entry to the successor. Note the successor's
// live_in will not be computed yet for backwards edges.
- BasicBlock* successor = *i;
- BitVector* live_in = live_in_sets_[successor->rpo_number()];
+ BitVector* live_in = live_in_sets_[succ.ToSize()];
if (live_in != NULL) live_out->Union(*live_in);
// All phi input operands corresponding to this successor edge are live
// out from this block.
- size_t index = successor->PredecessorIndexOf(block);
+ const InstructionBlock* successor = code()->InstructionBlockAt(succ);
+ size_t index = successor->PredecessorIndexOf(block->rpo_number());
DCHECK(index < successor->PredecessorCount());
- for (BasicBlock::const_iterator j = successor->begin();
- j != successor->end(); ++j) {
- Node* phi = *j;
- if (phi->opcode() != IrOpcode::kPhi) continue;
- Node* input = phi->InputAt(static_cast<int>(index));
- live_out->Add(code()->GetVirtualRegister(input));
+ for (auto phi : successor->phis()) {
+ live_out->Add(phi->operands()[index]);
}
}
return live_out;
}
-void RegisterAllocator::AddInitialIntervals(BasicBlock* block,
+void RegisterAllocator::AddInitialIntervals(const InstructionBlock* block,
BitVector* live_out) {
// Add an interval that includes the entire block to the live range for
// each live_out value.
- LifetimePosition start = LifetimePosition::FromInstructionIndex(
- code()->first_instruction_index(block));
- LifetimePosition end =
- LifetimePosition::FromInstructionIndex(
- code()->last_instruction_index(block)).NextInstruction();
+ LifetimePosition start =
+
LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+ LifetimePosition end = LifetimePosition::FromInstructionIndex(
+
block->last_instruction_index()).NextInstruction();
BitVector::Iterator iterator(live_out);
while (!iterator.Done()) {
int operand_index = iterator.Current();
@@ -651,8 +644,8 @@
}
-GapInstruction* RegisterAllocator::GetLastGap(BasicBlock* block) {
- int last_instruction = code()->last_instruction_index(block);
+GapInstruction* RegisterAllocator::GetLastGap(const InstructionBlock*
block) {
+ int last_instruction = block->last_instruction_index();
return code()->GapAt(last_instruction - 1);
}
@@ -729,9 +722,9 @@
}
-void RegisterAllocator::MeetRegisterConstraints(BasicBlock* block) {
- int start = code()->first_instruction_index(block);
- int end = code()->last_instruction_index(block);
+void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock*
block) {
+ int start = block->first_instruction_index();
+ int end = block->last_instruction_index();
DCHECK_NE(-1, start);
for (int i = start; i <= end; ++i) {
if (code()->IsGapAt(i)) {
@@ -752,8 +745,8 @@
void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
- BasicBlock* block) {
- int end = code()->last_instruction_index(block);
+ const InstructionBlock* block) {
+ int end = block->last_instruction_index();
Instruction* last_instruction = InstructionAt(end);
for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
InstructionOperand* output_operand = last_instruction->OutputAt(i);
@@ -771,10 +764,10 @@
assigned = true;
}
- for (BasicBlock::Successors::iterator succ =
block->successors_begin();
- succ != block->successors_end(); ++succ) {
- DCHECK((*succ)->PredecessorCount() == 1);
- int gap_index = code()->first_instruction_index(*succ) + 1;
+ for (auto succ : block->successors()) {
+ const InstructionBlock* successor =
code()->InstructionBlockAt(succ);
+ DCHECK(successor->PredecessorCount() == 1);
+ int gap_index = successor->first_instruction_index() + 1;
DCHECK(code()->IsGapAt(gap_index));
// Create an unconstrained operand for the same virtual register
@@ -788,10 +781,10 @@
}
if (!assigned) {
- for (BasicBlock::Successors::iterator succ =
block->successors_begin();
- succ != block->successors_end(); ++succ) {
- DCHECK((*succ)->PredecessorCount() == 1);
- int gap_index = code()->first_instruction_index(*succ) + 1;
+ for (auto succ : block->successors()) {
+ const InstructionBlock* successor =
code()->InstructionBlockAt(succ);
+ DCHECK(successor->PredecessorCount() == 1);
+ int gap_index = successor->first_instruction_index() + 1;
range->SetSpillStartIndex(gap_index);
// This move to spill operand is not a real use. Liveness analysis
@@ -936,14 +929,14 @@
}
-void RegisterAllocator::ProcessInstructions(BasicBlock* block,
+void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
BitVector* live) {
- int block_start = code()->first_instruction_index(block);
+ int block_start = block->first_instruction_index();
LifetimePosition block_start_position =
LifetimePosition::FromInstructionIndex(block_start);
- for (int index = code()->last_instruction_index(block); index >=
block_start;
+ for (int index = block->last_instruction_index(); index >= block_start;
index--) {
LifetimePosition curr_position =
LifetimePosition::FromInstructionIndex(index);
@@ -1059,44 +1052,35 @@
}
-void RegisterAllocator::ResolvePhis(BasicBlock* block) {
- for (BasicBlock::const_iterator i = block->begin(); i != block->end();
++i) {
- Node* phi = *i;
- if (phi->opcode() != IrOpcode::kPhi) continue;
-
+void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
+ for (auto phi : block->phis()) {
UnallocatedOperand* phi_operand =
new (code_zone()) UnallocatedOperand(UnallocatedOperand::NONE);
- int phi_vreg = code()->GetVirtualRegister(phi);
+ int phi_vreg = phi->virtual_register();
phi_operand->set_virtual_register(phi_vreg);
- size_t j = 0;
- Node::Inputs inputs = phi->inputs();
- for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
- ++iter, ++j) {
- Node* op = *iter;
- // TODO(mstarzinger): Use a ValueInputIterator instead.
- if (j >= block->PredecessorCount()) continue;
+ for (size_t i = 0; i < phi->operands().size(); ++i) {
UnallocatedOperand* operand =
new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
- operand->set_virtual_register(code()->GetVirtualRegister(op));
- BasicBlock* cur_block = block->PredecessorAt(j);
+ operand->set_virtual_register(phi->operands()[i]);
+ InstructionBlock* cur_block =
+ code()->InstructionBlockAt(block->predecessors()[i]);
// The gap move must be added without any special processing as in
// the AddConstraintsGapMove.
- code()->AddGapMove(code()->last_instruction_index(cur_block) - 1,
operand,
+ code()->AddGapMove(cur_block->last_instruction_index() - 1, operand,
phi_operand);
- Instruction* branch =
- InstructionAt(code()->last_instruction_index(cur_block));
+ Instruction* branch =
InstructionAt(cur_block->last_instruction_index());
DCHECK(!branch->HasPointerMap());
USE(branch);
}
LiveRange* live_range = LiveRangeFor(phi_vreg);
BlockStartInstruction* block_start =
- code()->GetBlockStart(block->GetRpoNumber());
+ code()->GetBlockStart(block->rpo_number());
block_start->GetOrCreateParallelMove(GapInstruction::START,
code_zone())
->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone());
- live_range->SetSpillStartIndex(code()->first_instruction_index(block));
+ live_range->SetSpillStartIndex(block->first_instruction_index());
// We use the phi-ness of some nodes in some later heuristics.
live_range->set_is_phi(true);
@@ -1132,7 +1116,8 @@
void RegisterAllocator::MeetRegisterConstraints() {
RegisterAllocatorPhase phase("L_Register constraints", this);
for (int i = 0; i < code()->BasicBlockCount(); ++i) {
- MeetRegisterConstraints(code()->BlockAt(i));
+ MeetRegisterConstraints(
+ code()->InstructionBlockAt(BasicBlock::RpoNumber::FromInt(i)));
if (!AllocationOk()) return;
}
}
@@ -1143,17 +1128,18 @@
// Process the blocks in reverse order.
for (int i = code()->BasicBlockCount() - 1; i >= 0; --i) {
- ResolvePhis(code()->BlockAt(i));
+
ResolvePhis(code()->InstructionBlockAt(BasicBlock::RpoNumber::FromInt(i)));
}
}
-void RegisterAllocator::ResolveControlFlow(LiveRange* range, BasicBlock*
block,
- BasicBlock* pred) {
- LifetimePosition pred_end = LifetimePosition::FromInstructionIndex(
- code()->last_instruction_index(pred));
- LifetimePosition cur_start = LifetimePosition::FromInstructionIndex(
- code()->first_instruction_index(block));
+void RegisterAllocator::ResolveControlFlow(LiveRange* range,
+ const InstructionBlock* block,
+ const InstructionBlock* pred) {
+ LifetimePosition pred_end =
+
LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+ LifetimePosition cur_start =
+
LifetimePosition::FromInstructionIndex(block->first_instruction_index());
LiveRange* pred_cover = NULL;
LiveRange* cur_cover = NULL;
LiveRange* cur_range = range;
@@ -1178,13 +1164,12 @@
if (!pred_op->Equals(cur_op)) {
GapInstruction* gap = NULL;
if (block->PredecessorCount() == 1) {
- gap = code()->GapAt(code()->first_instruction_index(block));
+ gap = code()->GapAt(block->first_instruction_index());
} else {
DCHECK(pred->SuccessorCount() == 1);
gap = GetLastGap(pred);
- Instruction* branch =
- InstructionAt(code()->last_instruction_index(pred));
+ Instruction* branch =
InstructionAt(pred->last_instruction_index());
DCHECK(!branch->HasPointerMap());
USE(branch);
}
@@ -1211,8 +1196,9 @@
}
-BasicBlock* RegisterAllocator::GetBlock(LifetimePosition pos) {
- return code()->GetBasicBlock(pos.InstructionIndex());
+const InstructionBlock* RegisterAllocator::GetInstructionBlock(
+ LifetimePosition pos) {
+ return code()->GetInstructionBlock(pos.InstructionIndex());
}
@@ -1232,7 +1218,8 @@
if (first_range->End().Value() == pos.Value()) {
bool should_insert = true;
if (IsBlockBoundary(pos)) {
- should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
+ should_insert =
+ CanEagerlyResolveControlFlow(GetInstructionBlock(pos));
}
if (should_insert) {
ParallelMove* move = GetConnectingParallelMove(pos);
@@ -1252,24 +1239,25 @@
}
-bool RegisterAllocator::CanEagerlyResolveControlFlow(BasicBlock* block)
const {
+bool RegisterAllocator::CanEagerlyResolveControlFlow(
+ const InstructionBlock* block) const {
if (block->PredecessorCount() != 1) return false;
- return block->PredecessorAt(0)->rpo_number() == block->rpo_number() - 1;
+ return block->predecessors()[0].IsNext(block->rpo_number());
}
void RegisterAllocator::ResolveControlFlow() {
RegisterAllocatorPhase phase("L_Resolve control flow", this);
for (int block_id = 1; block_id < code()->BasicBlockCount(); ++block_id)
{
- BasicBlock* block = code()->BlockAt(block_id);
+ const InstructionBlock* block =
+
code()->InstructionBlockAt(BasicBlock::RpoNumber::FromInt(block_id));
if (CanEagerlyResolveControlFlow(block)) continue;
- BitVector* live = live_in_sets_[block->rpo_number()];
+ BitVector* live = live_in_sets_[block->rpo_number().ToInt()];
BitVector::Iterator iterator(live);
while (!iterator.Done()) {
int operand_index = iterator.Current();
- for (BasicBlock::Predecessors::iterator i =
block->predecessors_begin();
- i != block->predecessors_end(); ++i) {
- BasicBlock* cur = *i;
+ for (auto pred : block->predecessors()) {
+ const InstructionBlock* cur = code()->InstructionBlockAt(pred);
LiveRange* cur_range = LiveRangeFor(operand_index);
ResolveControlFlow(cur_range, block, cur);
}
@@ -1285,7 +1273,8 @@
// Process the blocks in reverse order.
for (int block_id = code()->BasicBlockCount() - 1; block_id >= 0;
--block_id) {
- BasicBlock* block = code()->BlockAt(block_id);
+ const InstructionBlock* block =
+
code()->InstructionBlockAt(BasicBlock::RpoNumber::FromInt(block_id));
BitVector* live = ComputeLiveOut(block);
// Initially consider all live_out values live for the entire block. We
// will shorten these intervals if necessary.
@@ -1295,19 +1284,16 @@
// live values.
ProcessInstructions(block, live);
// All phi output operands are killed by this block.
- for (BasicBlock::const_iterator i = block->begin(); i != block->end();
- ++i) {
- Node* phi = *i;
- if (phi->opcode() != IrOpcode::kPhi) continue;
-
+ for (auto phi : block->phis()) {
// The live range interval already ends at the first instruction of
the
// block.
- int phi_vreg = code()->GetVirtualRegister(phi);
+ int phi_vreg = phi->virtual_register();
live->Remove(phi_vreg);
InstructionOperand* hint = NULL;
InstructionOperand* phi_operand = NULL;
- GapInstruction* gap = GetLastGap(block->PredecessorAt(0));
+ GapInstruction* gap =
+ GetLastGap(code()->InstructionBlockAt(block->predecessors()[0]));
// TODO(titzer): no need to create the parallel move if it doesn't
exit.
ParallelMove* move =
@@ -1324,7 +1310,7 @@
DCHECK(hint != NULL);
LifetimePosition block_start =
LifetimePosition::FromInstructionIndex(
- code()->first_instruction_index(block));
+ block->first_instruction_index());
Define(block_start, phi_operand, hint);
}
@@ -1337,9 +1323,10 @@
// for each value live on entry to the header.
BitVector::Iterator iterator(live);
LifetimePosition start = LifetimePosition::FromInstructionIndex(
- code()->first_instruction_index(block));
- int end_index =
-
code()->last_instruction_index(code()->BlockAt(block->loop_end()));
+ block->first_instruction_index());
+ int end_index = code()
+ ->InstructionBlockAt(block->loop_end())
+ ->last_instruction_index();
LifetimePosition end =
LifetimePosition::FromInstructionIndex(end_index).NextInstruction();
while (!iterator.Done()) {
@@ -1350,7 +1337,8 @@
}
// Insert all values into the live in sets of all blocks in the loop.
- for (int i = block->rpo_number() + 1; i < block->loop_end(); ++i) {
+ for (int i = block->rpo_number().ToInt() + 1;
+ i < block->loop_end().ToInt(); ++i) {
live_in_sets_[i]->Union(*live);
}
}
@@ -1958,8 +1946,8 @@
LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
LiveRange* range, LifetimePosition pos) {
- BasicBlock* block = GetBlock(pos.InstructionStart());
- BasicBlock* loop_header =
+ const InstructionBlock* block =
GetInstructionBlock(pos.InstructionStart());
+ const InstructionBlock* loop_header =
block->IsLoopHeader() ? block : code()->GetContainingLoop(block);
if (loop_header == NULL) return pos;
@@ -1971,7 +1959,7 @@
// If possible try to move spilling position backwards to loop header.
// This will reduce number of memory moves on the back edge.
LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
- code()->first_instruction_index(loop_header));
+ loop_header->first_instruction_index());
if (range->Covers(loop_start)) {
if (prev_use == NULL || prev_use->pos().Value() <
loop_start.Value()) {
@@ -2086,8 +2074,8 @@
// We have no choice
if (start_instr == end_instr) return end;
- BasicBlock* start_block = GetBlock(start);
- BasicBlock* end_block = GetBlock(end);
+ const InstructionBlock* start_block = GetInstructionBlock(start);
+ const InstructionBlock* end_block = GetInstructionBlock(end);
if (end_block == start_block) {
// The interval is split in the same basic block. Split at the latest
@@ -2095,12 +2083,12 @@
return end;
}
- BasicBlock* block = end_block;
+ const InstructionBlock* block = end_block;
// Find header of outermost loop.
// TODO(titzer): fix redundancy below.
while (code()->GetContainingLoop(block) != NULL &&
- code()->GetContainingLoop(block)->rpo_number() >
- start_block->rpo_number()) {
+ code()->GetContainingLoop(block)->rpo_number().ToInt() >
+ start_block->rpo_number().ToInt()) {
block = code()->GetContainingLoop(block);
}
@@ -2109,7 +2097,7 @@
if (block == end_block && !end_block->IsLoopHeader()) return end;
return LifetimePosition::FromInstructionIndex(
- code()->first_instruction_index(block));
+ block->first_instruction_index());
}
=======================================
--- /branches/bleeding_edge/src/compiler/register-allocator.h Mon Aug 11
12:26:17 2014 UTC
+++ /branches/bleeding_edge/src/compiler/register-allocator.h Mon Oct 20
07:32:01 2014 UTC
@@ -7,8 +7,6 @@
#include "src/allocation.h"
#include "src/compiler/instruction.h"
-#include "src/compiler/node.h"
-#include "src/compiler/schedule.h"
#include "src/macro-assembler.h"
#include "src/zone.h"
@@ -378,21 +376,22 @@
void ResolveControlFlow();
void PopulatePointerMaps(); // TODO(titzer): rename to
PopulateReferenceMaps.
void AllocateRegisters();
- bool CanEagerlyResolveControlFlow(BasicBlock* block) const;
+ bool CanEagerlyResolveControlFlow(const InstructionBlock* block) const;
inline bool SafePointsAreInOrder() const;
// Liveness analysis support.
void InitializeLivenessAnalysis();
- BitVector* ComputeLiveOut(BasicBlock* block);
- void AddInitialIntervals(BasicBlock* block, BitVector* live_out);
+ BitVector* ComputeLiveOut(const InstructionBlock* block);
+ void AddInitialIntervals(const InstructionBlock* block, BitVector*
live_out);
bool IsOutputRegisterOf(Instruction* instr, int index);
bool IsOutputDoubleRegisterOf(Instruction* instr, int index);
- void ProcessInstructions(BasicBlock* block, BitVector* live);
- void MeetRegisterConstraints(BasicBlock* block);
+ void ProcessInstructions(const InstructionBlock* block, BitVector* live);
+ void MeetRegisterConstraints(const InstructionBlock* block);
void MeetConstraintsBetween(Instruction* first, Instruction* second,
int gap_index);
- void MeetRegisterConstraintsForLastInstructionInBlock(BasicBlock* block);
- void ResolvePhis(BasicBlock* block);
+ void MeetRegisterConstraintsForLastInstructionInBlock(
+ const InstructionBlock* block);
+ void ResolvePhis(const InstructionBlock* block);
// Helper methods for building intervals.
InstructionOperand* AllocateFixed(UnallocatedOperand* operand, int pos,
@@ -466,8 +465,8 @@
bool IsBlockBoundary(LifetimePosition pos);
// Helper methods for resolving control flow.
- void ResolveControlFlow(LiveRange* range, BasicBlock* block,
- BasicBlock* pred);
+ void ResolveControlFlow(LiveRange* range, const InstructionBlock* block,
+ const InstructionBlock* pred);
inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
@@ -476,7 +475,7 @@
ParallelMove* GetConnectingParallelMove(LifetimePosition pos);
// Return the block which contains give lifetime position.
- BasicBlock* GetBlock(LifetimePosition pos);
+ const InstructionBlock* GetInstructionBlock(LifetimePosition pos);
// Helper methods for the fixed registers.
int RegisterCount() const;
@@ -485,7 +484,7 @@
LiveRange* FixedLiveRangeFor(int index);
LiveRange* FixedDoubleLiveRangeFor(int index);
LiveRange* LiveRangeFor(int index);
- GapInstruction* GetLastGap(BasicBlock* block);
+ GapInstruction* GetLastGap(const InstructionBlock* block);
const char* RegisterName(int allocation_index);
=======================================
--- /branches/bleeding_edge/src/compiler/schedule.cc Wed Oct 15 11:38:04
2014 UTC
+++ /branches/bleeding_edge/src/compiler/schedule.cc Mon Oct 20 07:32:01
2014 UTC
@@ -33,12 +33,6 @@
if (loop_end_ < 0) return false; // This is not a loop.
return block->rpo_number_ >= rpo_number_ && block->rpo_number_ <
loop_end_;
}
-
-
-BasicBlock* BasicBlock::ContainingLoop() {
- if (IsLoopHeader()) return this;
- return loop_header();
-}
void BasicBlock::AddSuccessor(BasicBlock* successor) {
@@ -86,16 +80,6 @@
void BasicBlock::set_loop_header(BasicBlock* loop_header) {
loop_header_ = loop_header;
}
-
-
-size_t BasicBlock::PredecessorIndexOf(BasicBlock* predecessor) {
- size_t j = 0;
- for (BasicBlock::Predecessors::iterator i = predecessors_.begin();
- i != predecessors_.end(); ++i, ++j) {
- if (*i == predecessor) break;
- }
- return j;
-}
std::ostream& operator<<(std::ostream& os, const BasicBlock::Control& c) {
=======================================
--- /branches/bleeding_edge/src/compiler/schedule.h Tue Oct 14 08:51:22
2014 UTC
+++ /branches/bleeding_edge/src/compiler/schedule.h Mon Oct 20 07:32:01
2014 UTC
@@ -50,22 +50,33 @@
size_t index_;
};
+ static const int kInvalidRpoNumber = -1;
class RpoNumber FINAL {
public:
- int ToInt() const { return static_cast<int>(index_); }
- size_t ToSize() const { return index_; }
- static RpoNumber FromInt(int index) {
- return RpoNumber(static_cast<size_t>(index));
+ int ToInt() const {
+ DCHECK(IsValid());
+ return index_;
}
- static RpoNumber Invalid() { return
RpoNumber(static_cast<size_t>(-1)); }
+ size_t ToSize() const {
+ DCHECK(IsValid());
+ return static_cast<size_t>(index_);
+ }
+ bool IsValid() const { return index_ != kInvalidRpoNumber; }
+ static RpoNumber FromInt(int index) { return RpoNumber(index); }
+ static RpoNumber Invalid() { return RpoNumber(kInvalidRpoNumber); }
bool IsNext(const RpoNumber other) const {
+ DCHECK(IsValid());
return other.index_ == this->index_ + 1;
}
+
+ bool operator==(RpoNumber other) const {
+ return this->index_ == other.index_;
+ }
private:
- explicit RpoNumber(size_t index) : index_(index) {}
- size_t index_;
+ explicit RpoNumber(int32_t index) : index_(index) {}
+ int32_t index_;
};
BasicBlock(Zone* zone, Id id);
@@ -76,14 +87,25 @@
typedef ZoneVector<BasicBlock*> Predecessors;
Predecessors::iterator predecessors_begin() { return
predecessors_.begin(); }
Predecessors::iterator predecessors_end() { return predecessors_.end(); }
+ Predecessors::const_iterator predecessors_begin() const {
+ return predecessors_.begin();
+ }
+ Predecessors::const_iterator predecessors_end() const {
+ return predecessors_.end();
+ }
size_t PredecessorCount() const { return predecessors_.size(); }
BasicBlock* PredecessorAt(size_t index) { return predecessors_[index]; }
- size_t PredecessorIndexOf(BasicBlock* predecessor);
void AddPredecessor(BasicBlock* predecessor);
typedef ZoneVector<BasicBlock*> Successors;
Successors::iterator successors_begin() { return successors_.begin(); }
Successors::iterator successors_end() { return successors_.end(); }
+ Successors::const_iterator successors_begin() const {
+ return successors_.begin();
+ }
+ Successors::const_iterator successors_end() const {
+ return successors_.end();
+ }
size_t SuccessorCount() const { return successors_.size(); }
BasicBlock* SuccessorAt(size_t index) { return successors_[index]; }
void AddSuccessor(BasicBlock* successor);
@@ -137,7 +159,6 @@
// Loop membership helpers.
inline bool IsLoopHeader() const { return loop_end_ >= 0; }
bool LoopContains(BasicBlock* block) const;
- BasicBlock* ContainingLoop();
private:
int32_t rpo_number_; // special RPO number of the block.
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.