Revision: 19661
Author: [email protected]
Date: Tue Mar 4 15:54:12 2014 UTC
Log: A64: Move veneer emission checking in the Assembler.
The previous heuristic would break as a significant amount of code could be
generated without checking for veneer emission.
The veneer emission is now done in the Assembler, in a very similar way to
constant pool emission.
BUG=v8:3177
LOG=N
[email protected]
Review URL: https://codereview.chromium.org/181873002
http://code.google.com/p/v8/source/detail?r=19661
Modified:
/branches/bleeding_edge/src/a64/assembler-a64-inl.h
/branches/bleeding_edge/src/a64/assembler-a64.cc
/branches/bleeding_edge/src/a64/assembler-a64.h
/branches/bleeding_edge/src/a64/code-stubs-a64.cc
/branches/bleeding_edge/src/a64/full-codegen-a64.cc
/branches/bleeding_edge/src/a64/lithium-codegen-a64.cc
/branches/bleeding_edge/src/a64/macro-assembler-a64-inl.h
/branches/bleeding_edge/src/a64/macro-assembler-a64.cc
/branches/bleeding_edge/src/a64/macro-assembler-a64.h
=======================================
--- /branches/bleeding_edge/src/a64/assembler-a64-inl.h Tue Feb 18 13:15:32
2014 UTC
+++ /branches/bleeding_edge/src/a64/assembler-a64-inl.h Tue Mar 4 15:54:12
2014 UTC
@@ -1178,7 +1178,10 @@
if (buffer_space() < kGap) {
GrowBuffer();
}
- if (pc_offset() >= next_buffer_check_) {
+ if (pc_offset() >= next_veneer_pool_check_) {
+ CheckVeneerPool(true);
+ }
+ if (pc_offset() >= next_constant_pool_check_) {
CheckConstPool(false, true);
}
}
=======================================
--- /branches/bleeding_edge/src/a64/assembler-a64.cc Tue Feb 18 13:15:32
2014 UTC
+++ /branches/bleeding_edge/src/a64/assembler-a64.cc Tue Mar 4 15:54:12
2014 UTC
@@ -286,6 +286,7 @@
unresolved_branches_(),
positions_recorder_(this) {
const_pool_blocked_nesting_ = 0;
+ veneer_pool_blocked_nesting_ = 0;
Reset();
}
@@ -293,6 +294,7 @@
Assembler::~Assembler() {
ASSERT(num_pending_reloc_info_ == 0);
ASSERT(const_pool_blocked_nesting_ == 0);
+ ASSERT(veneer_pool_blocked_nesting_ == 0);
}
@@ -300,13 +302,16 @@
#ifdef DEBUG
ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
ASSERT(const_pool_blocked_nesting_ == 0);
+ ASSERT(veneer_pool_blocked_nesting_ == 0);
+ ASSERT(unresolved_branches_.empty());
memset(buffer_, 0, pc_ - buffer_);
#endif
pc_ = buffer_;
reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ +
buffer_size_),
reinterpret_cast<byte*>(pc_));
num_pending_reloc_info_ = 0;
- next_buffer_check_ = 0;
+ next_constant_pool_check_ = 0;
+ next_veneer_pool_check_ = kMaxInt;
no_const_pool_before_ = 0;
first_const_pool_use_ = -1;
ClearRecordedAstId();
@@ -534,6 +539,11 @@
void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
+ if (unresolved_branches_.empty()) {
+ ASSERT(next_veneer_pool_check_ == kMaxInt);
+ return;
+ }
+
// Branches to this label will be resolved when the label is bound below.
std::multimap<int, FarBranchInfo>::iterator it_tmp, it;
it = unresolved_branches_.begin();
@@ -544,6 +554,12 @@
unresolved_branches_.erase(it_tmp);
}
}
+ if (unresolved_branches_.empty()) {
+ next_veneer_pool_check_ = kMaxInt;
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
}
@@ -551,7 +567,7 @@
if (const_pool_blocked_nesting_++ == 0) {
// Prevent constant pool checks happening by setting the next check to
// the biggest possible offset.
- next_buffer_check_ = kMaxInt;
+ next_constant_pool_check_ = kMaxInt;
}
}
@@ -560,13 +576,13 @@
if (--const_pool_blocked_nesting_ == 0) {
// Check the constant pool hasn't been blocked for too long.
ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
+ (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool)));
// Two cases:
- // * no_const_pool_before_ >= next_buffer_check_ and the emission is
+ // * no_const_pool_before_ >= next_constant_pool_check_ and the
emission is
// still blocked
- // * no_const_pool_before_ < next_buffer_check_ and the next emit will
- // trigger a check.
- next_buffer_check_ = no_const_pool_before_;
+ // * no_const_pool_before_ < next_constant_pool_check_ and the next
emit
+ // will trigger a check.
+ next_constant_pool_check_ = no_const_pool_before_;
}
}
@@ -620,6 +636,20 @@
// We must generate only one instruction.
Emit(BLR | Rn(xzr));
}
+
+
+void Assembler::StartBlockVeneerPool() {
+ ++veneer_pool_blocked_nesting_;
+}
+
+
+void Assembler::EndBlockVeneerPool() {
+ if (--veneer_pool_blocked_nesting_ == 0) {
+ // Check the veneer pool hasn't been blocked for too long.
+ ASSERT(unresolved_branches_.empty() ||
+ (pc_offset() < unresolved_branches_first_limit()));
+ }
+}
void Assembler::br(const Register& xn) {
@@ -1870,8 +1900,8 @@
Serializer::TooLateToEnableNow();
#endif
// The arguments to the debug marker need to be contiguous in memory,
so
- // make sure we don't try to emit a literal pool.
- BlockConstPoolScope scope(this);
+ // make sure we don't try to emit pools.
+ BlockPoolsScope scope(this);
Label start;
bind(&start);
@@ -2445,14 +2475,14 @@
int pc_limit = pc_offset() + instructions * kInstructionSize;
if (no_const_pool_before_ < pc_limit) {
// If there are some pending entries, the constant pool cannot be
blocked
- // further than first_const_pool_use_ + kMaxDistToPool
+ // further than first_const_pool_use_ + kMaxDistToConstPool
ASSERT((num_pending_reloc_info_ == 0) ||
- (pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
+ (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool)));
no_const_pool_before_ = pc_limit;
}
- if (next_buffer_check_ < no_const_pool_before_) {
- next_buffer_check_ = no_const_pool_before_;
+ if (next_constant_pool_check_ < no_const_pool_before_) {
+ next_constant_pool_check_ = no_const_pool_before_;
}
}
@@ -2470,22 +2500,33 @@
// There is nothing to do if there are no pending constant pool entries.
if (num_pending_reloc_info_ == 0) {
// Calculate the offset of the next check.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
return;
}
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each
function).
// * the distance to the first instruction accessing the constant pool
is
- // kAvgDistToPool or more.
+ // kAvgDistToConstPool or more.
// * no jump is required and the distance to the first instruction
accessing
- // the constant pool is at least kMaxDistToPool / 2.
+ // the constant pool is at least kMaxDistToPConstool / 2.
ASSERT(first_const_pool_use_ >= 0);
int dist = pc_offset() - first_const_pool_use_;
- if (!force_emit && dist < kAvgDistToPool &&
- (require_jump || (dist < (kMaxDistToPool / 2)))) {
+ if (!force_emit && dist < kAvgDistToConstPool &&
+ (require_jump || (dist < (kMaxDistToConstPool / 2)))) {
return;
}
+
+ int jump_instr = require_jump ? kInstructionSize : 0;
+ int size_pool_marker = kInstructionSize;
+ int size_pool_guard = kInstructionSize;
+ int pool_size = jump_instr + size_pool_marker + size_pool_guard +
+ num_pending_reloc_info_ * kPointerSize;
+ int needed_space = pool_size + kGap;
+
+ // Emit veneers for branches that would go out of range during emission
of the
+ // constant pool.
+ CheckVeneerPool(require_jump, kVeneerDistanceMargin - pool_size);
Label size_check;
bind(&size_check);
@@ -2493,19 +2534,13 @@
// Check that the code buffer is large enough before emitting the
constant
// pool (include the jump over the pool, the constant pool marker, the
// constant pool guard, and the gap to the relocation information).
- int jump_instr = require_jump ? kInstructionSize : 0;
- int size_pool_marker = kInstructionSize;
- int size_pool_guard = kInstructionSize;
- int pool_size = jump_instr + size_pool_marker + size_pool_guard +
- num_pending_reloc_info_ * kPointerSize;
- int needed_space = pool_size + kGap;
while (buffer_space() <= needed_space) {
GrowBuffer();
}
{
- // Block recursive calls to CheckConstPool.
- BlockConstPoolScope block_const_pool(this);
+ // Block recursive calls to CheckConstPool and protect from veneer
pools.
+ BlockPoolsScope block_pools(this);
RecordComment("[ Constant Pool");
RecordConstPool(pool_size);
@@ -2558,11 +2593,112 @@
// Since a constant pool was just emitted, move the check offset forward
by
// the standard interval.
- next_buffer_check_ = pc_offset() + kCheckPoolInterval;
+ next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
static_cast<unsigned>(pool_size));
}
+
+
+bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
+ // Account for the branch around the veneers and the guard.
+ int protection_offset = 2 * kInstructionSize;
+ return pc_offset() > max_reachable_pc - margin - protection_offset -
+ static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
+}
+
+
+void Assembler::EmitVeneers(bool need_protection, int margin) {
+ BlockPoolsScope scope(this);
+ RecordComment("[ Veneers");
+
+ Label end;
+ if (need_protection) {
+ b(&end);
+ }
+
+ EmitVeneersGuard();
+
+ Label size_check;
+
+ std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
+
+ it = unresolved_branches_.begin();
+ while (it != unresolved_branches_.end()) {
+ if (ShouldEmitVeneer(it->first, margin)) {
+ Instruction* branch = InstructionAt(it->second.pc_offset_);
+ Label* label = it->second.label_;
+
+#ifdef DEBUG
+ bind(&size_check);
+#endif
+ // Patch the branch to point to the current position, and emit a
branch
+ // to the label.
+ Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
+ RemoveBranchFromLabelLinkChain(branch, label, veneer);
+ branch->SetImmPCOffsetTarget(veneer);
+ b(label);
+#ifdef DEBUG
+ ASSERT(SizeOfCodeGeneratedSince(&size_check) <=
+ static_cast<uint64_t>(kMaxVeneerCodeSize));
+ size_check.Unuse();
+#endif
+
+ it_to_delete = it++;
+ unresolved_branches_.erase(it_to_delete);
+ } else {
+ ++it;
+ }
+ }
+
+ if (unresolved_branches_.empty()) {
+ next_veneer_pool_check_ = kMaxInt;
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
+
+ bind(&end);
+
+ RecordComment("]");
+}
+
+
+void Assembler::EmitVeneersGuard() {
+ if (emit_debug_code()) {
+ Unreachable();
+ }
+}
+
+
+void Assembler::CheckVeneerPool(bool require_jump,
+ int margin) {
+ // There is nothing to do if there are no pending veneer pool entries.
+ if (unresolved_branches_.empty()) {
+ ASSERT(next_veneer_pool_check_ == kMaxInt);
+ return;
+ }
+
+ ASSERT(pc_offset() < unresolved_branches_first_limit());
+
+ // Some short sequence of instruction mustn't be broken up by veneer pool
+ // emission, such sequences are protected by calls to BlockVeneerPoolFor
and
+ // BlockVeneerPoolScope.
+ if (is_veneer_pool_blocked()) {
+ return;
+ }
+
+ if (!require_jump) {
+ // Prefer emitting veneers protected by an existing instruction.
+ margin *= kVeneerNoProtectionFactor;
+ }
+ if (ShouldEmitVeneers(margin)) {
+ EmitVeneers(require_jump, margin);
+ } else {
+ next_veneer_pool_check_ =
+ unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
+ }
+}
void Assembler::RecordComment(const char* msg) {
=======================================
--- /branches/bleeding_edge/src/a64/assembler-a64.h Tue Feb 18 13:15:32
2014 UTC
+++ /branches/bleeding_edge/src/a64/assembler-a64.h Tue Mar 4 15:54:12
2014 UTC
@@ -730,7 +730,7 @@
void bind(Label* label);
- // RelocInfo and constant pool
----------------------------------------------
+ // RelocInfo and pools
------------------------------------------------------
// Record relocation information for current pc_.
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
@@ -841,6 +841,28 @@
void ConstantPoolMarker(uint32_t size);
void ConstantPoolGuard();
+ // Prevent veneer pool emission until EndBlockVeneerPool is called.
+ // Call to this function can be nested but must be followed by an equal
+ // number of call to EndBlockConstpool.
+ void StartBlockVeneerPool();
+
+ // Resume constant pool emission. Need to be called as many time as
+ // StartBlockVeneerPool to have an effect.
+ void EndBlockVeneerPool();
+
+ bool is_veneer_pool_blocked() const {
+ return veneer_pool_blocked_nesting_ > 0;
+ }
+
+ // Block/resume emission of constant pools and veneer pools.
+ void StartBlockPools() {
+ StartBlockConstPool();
+ StartBlockVeneerPool();
+ }
+ void EndBlockPools() {
+ EndBlockConstPool();
+ EndBlockVeneerPool();
+ }
// Debugging
----------------------------------------------------------------
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
@@ -1718,6 +1740,44 @@
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
+
+ // Returns true if we should emit a veneer as soon as possible for a
branch
+ // which can at most reach to specified pc.
+ bool ShouldEmitVeneer(int max_reachable_pc,
+ int margin = kVeneerDistanceMargin);
+ bool ShouldEmitVeneers(int margin = kVeneerDistanceMargin) {
+ return ShouldEmitVeneer(unresolved_branches_first_limit(), margin);
+ }
+
+ // The maximum code size generated for a veneer. Currently one branch
+ // instruction. This is for code size checking purposes, and can be
extended
+ // in the future for example if we decide to add nops between the
veneers.
+ static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
+
+ // Emits veneers for branches that are approaching their maximum range.
+ // If need_protection is true, the veneers are protected by a branch
jumping
+ // over the code.
+ void EmitVeneers(bool need_protection, int margin =
kVeneerDistanceMargin);
+ void EmitVeneersGuard();
+ // Checks whether veneers need to be emitted at this point.
+ void CheckVeneerPool(bool require_jump, int margin =
kVeneerDistanceMargin);
+
+
+ class BlockPoolsScope {
+ public:
+ explicit BlockPoolsScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockPools();
+ }
+ ~BlockPoolsScope() {
+ assem_->EndBlockPools();
+ }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockPoolsScope);
+ };
+
// Available for constrained code generation scopes. Prefer
// MacroAssembler::Mov() when possible.
inline void LoadRelocated(const CPURegister& rt, const Operand& operand);
@@ -1903,8 +1963,8 @@
void GrowBuffer();
void CheckBuffer();
- // Pc offset of the next buffer check.
- int next_buffer_check_;
+ // Pc offset of the next constant pool check.
+ int next_constant_pool_check_;
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after
unconditional
@@ -1920,15 +1980,16 @@
// expensive. By default we only check again once a number of
instructions
// has been generated. That also means that the sizing of the buffers is
not
// an exact science, and that we rely on some slop to not overrun
buffers.
- static const int kCheckPoolIntervalInst = 128;
- static const int kCheckPoolInterval =
- kCheckPoolIntervalInst * kInstructionSize;
+ static const int kCheckConstPoolIntervalInst = 128;
+ static const int kCheckConstPoolInterval =
+ kCheckConstPoolIntervalInst * kInstructionSize;
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the
instruction
// and the accessed constant.
- static const int kMaxDistToPool = 4 * KB;
- static const int kMaxNumPendingRelocInfo = kMaxDistToPool /
kInstructionSize;
+ static const int kMaxDistToConstPool = 4 * KB;
+ static const int kMaxNumPendingRelocInfo =
+ kMaxDistToConstPool / kInstructionSize;
// Average distance beetween a constant pool and the first instruction
@@ -1936,7 +1997,8 @@
// pollution.
// In practice the distance will be smaller since constant pool emission
is
// forced after function return and sometimes after unconditional
branches.
- static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
+ static const int kAvgDistToConstPool =
+ kMaxDistToConstPool - kCheckConstPoolInterval;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
@@ -1946,6 +2008,9 @@
// since the previous constant pool was emitted.
int first_const_pool_use_;
+ // Emission of the veneer pools may be blocked in some code sequences.
+ int veneer_pool_blocked_nesting_; // Block emission if this is not zero.
+
// Relocation info generation
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
@@ -2013,6 +2078,25 @@
// pc_offset() for convenience.
std::multimap<int, FarBranchInfo> unresolved_branches_;
+ // We generate a veneer for a branch if we reach within this distance of
the
+ // limit of the range.
+ static const int kVeneerDistanceMargin = 1 * KB;
+ // The factor of 2 is a finger in the air guess. With a default margin of
+ // 1KB, that leaves us an addional 256 instructions to avoid generating a
+ // protective branch.
+ static const int kVeneerNoProtectionFactor = 2;
+ static const int kVeneerDistanceCheckMargin =
+ kVeneerNoProtectionFactor * kVeneerDistanceMargin;
+ int unresolved_branches_first_limit() const {
+ ASSERT(!unresolved_branches_.empty());
+ return unresolved_branches_.begin()->first;
+ }
+ // This is similar to next_constant_pool_check_ and helps reduce the
overhead
+ // of checking for veneer pools.
+ // It is maintained to the closest unresolved branch limit minus the
maximum
+ // veneer margin (or kMaxInt if there are no unresolved branches).
+ int next_veneer_pool_check_;
+
private:
// If a veneer is emitted for a branch instruction, that instruction
must be
// removed from the associated label's link chain so that the assembler
does
@@ -2021,14 +2105,6 @@
void DeleteUnresolvedBranchInfoForLabel(Label* label);
private:
- // TODO(jbramley): VIXL uses next_literal_pool_check_ and
- // literal_pool_monitor_ to determine when to consider emitting a literal
- // pool. V8 doesn't use them, so they should either not be here at all,
or
- // should replace or be merged with next_buffer_check_ and
- // const_pool_blocked_nesting_.
- Instruction* next_literal_pool_check_;
- unsigned literal_pool_monitor_;
-
PositionsRecorder positions_recorder_;
friend class PositionsRecorder;
friend class EnsureSpace;
=======================================
--- /branches/bleeding_edge/src/a64/code-stubs-a64.cc Mon Mar 3 13:27:59
2014 UTC
+++ /branches/bleeding_edge/src/a64/code-stubs-a64.cc Tue Mar 4 15:54:12
2014 UTC
@@ -1844,7 +1844,7 @@
// checking for constant pool emission, but we do not want to depend on
// that.
{
- Assembler::BlockConstPoolScope block_const_pool(masm);
+ Assembler::BlockPoolsScope block_pools(masm);
__ bind(&handler_entry);
handler_offset_ = handler_entry.pos();
// Caught exception: Store result (exception) in the pending exception
@@ -4948,7 +4948,7 @@
if (masm->isolate()->function_entry_hook() != NULL) {
// TODO(all): This needs to be reliably consistent with
// kReturnAddressDistanceFromFunctionStart in ::Generate.
- Assembler::BlockConstPoolScope no_const_pools(masm);
+ Assembler::BlockPoolsScope no_pools(masm);
ProfileEntryHookStub stub;
__ Push(lr);
__ CallStub(&stub);
=======================================
--- /branches/bleeding_edge/src/a64/full-codegen-a64.cc Tue Mar 4 12:48:17
2014 UTC
+++ /branches/bleeding_edge/src/a64/full-codegen-a64.cc Tue Mar 4 15:54:12
2014 UTC
@@ -93,7 +93,7 @@
}
void EmitPatchInfo() {
- Assembler::BlockConstPoolScope scope(masm_);
+ Assembler::BlockPoolsScope scope(masm_);
InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_);
#ifdef DEBUG
info_emitted_ = true;
@@ -350,7 +350,7 @@
ASSERT(jssp.Is(__ StackPointer()));
Comment cmnt(masm_, "[ Back edge bookkeeping");
// Block literal pools whilst emitting back edge code.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
+ Assembler::BlockPoolsScope block_const_pool(masm_);
Label ok;
ASSERT(back_edge_target->is_bound());
@@ -2006,7 +2006,7 @@
__ Bind(&stub_call);
BinaryOpICStub stub(op, mode);
{
- Assembler::BlockConstPoolScope scope(masm_);
+ Assembler::BlockPoolsScope scope(masm_);
CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
}
@@ -2092,7 +2092,7 @@
BinaryOpICStub stub(op, mode);
JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi
code.
{
- Assembler::BlockConstPoolScope scope(masm_);
+ Assembler::BlockPoolsScope scope(masm_);
CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
}
@@ -4116,7 +4116,7 @@
SetSourcePosition(expr->position());
{
- Assembler::BlockConstPoolScope scope(masm_);
+ Assembler::BlockPoolsScope scope(masm_);
BinaryOpICStub stub(Token::ADD, NO_OVERWRITE);
CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
=======================================
--- /branches/bleeding_edge/src/a64/lithium-codegen-a64.cc Tue Mar 4
12:48:17 2014 UTC
+++ /branches/bleeding_edge/src/a64/lithium-codegen-a64.cc Tue Mar 4
15:54:12 2014 UTC
@@ -414,7 +414,7 @@
SafepointMode safepoint_mode) {
ASSERT(instr != NULL);
- Assembler::BlockConstPoolScope scope(masm_);
+ Assembler::BlockPoolsScope scope(masm_);
__ Call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode);
=======================================
--- /branches/bleeding_edge/src/a64/macro-assembler-a64-inl.h Wed Feb 19
09:43:45 2014 UTC
+++ /branches/bleeding_edge/src/a64/macro-assembler-a64-inl.h Tue Mar 4
15:54:12 2014 UTC
@@ -346,7 +346,7 @@
void MacroAssembler::B(Label* label) {
b(label);
- CheckVeneers(false);
+ CheckVeneerPool(false);
}
@@ -1014,7 +1014,7 @@
ASSERT(allow_macro_instructions_);
ASSERT(!xn.IsZero());
ret(xn);
- CheckVeneers(false);
+ CheckVeneerPool(false);
}
=======================================
--- /branches/bleeding_edge/src/a64/macro-assembler-a64.cc Fri Feb 28
10:39:36 2014 UTC
+++ /branches/bleeding_edge/src/a64/macro-assembler-a64.cc Tue Mar 4
15:54:12 2014 UTC
@@ -556,92 +556,6 @@
Str(rt, addr);
}
}
-
-
-bool MacroAssembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
- // Account for the branch around the veneers and the guard.
- int protection_offset = 2 * kInstructionSize;
- return pc_offset() > max_reachable_pc - margin - protection_offset -
- static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
-}
-
-
-void MacroAssembler::EmitVeneers(bool need_protection) {
- RecordComment("[ Veneers");
-
- Label end;
- if (need_protection) {
- B(&end);
- }
-
- EmitVeneersGuard();
-
- {
- InstructionAccurateScope scope(this);
- Label size_check;
-
- std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
-
- it = unresolved_branches_.begin();
- while (it != unresolved_branches_.end()) {
- if (ShouldEmitVeneer(it->first)) {
- Instruction* branch = InstructionAt(it->second.pc_offset_);
- Label* label = it->second.label_;
-
-#ifdef DEBUG
- __ bind(&size_check);
-#endif
- // Patch the branch to point to the current position, and emit a
branch
- // to the label.
- Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
- RemoveBranchFromLabelLinkChain(branch, label, veneer);
- branch->SetImmPCOffsetTarget(veneer);
- b(label);
-#ifdef DEBUG
- ASSERT(SizeOfCodeGeneratedSince(&size_check) <=
- static_cast<uint64_t>(kMaxVeneerCodeSize));
- size_check.Unuse();
-#endif
-
- it_to_delete = it++;
- unresolved_branches_.erase(it_to_delete);
- } else {
- ++it;
- }
- }
- }
-
- Bind(&end);
-
- RecordComment("]");
-}
-
-
-void MacroAssembler::EmitVeneersGuard() {
- if (emit_debug_code()) {
- Unreachable();
- }
-}
-
-
-void MacroAssembler::CheckVeneers(bool need_protection) {
- if (unresolved_branches_.empty()) {
- return;
- }
-
- CHECK(pc_offset() < unresolved_branches_first_limit());
- int margin = kVeneerDistanceMargin;
- if (!need_protection) {
- // Prefer emitting veneers protected by an existing instruction.
- // The 4 divisor is a finger in the air guess. With a default margin
of 2KB,
- // that leaves 512B = 128 instructions of extra margin to avoid
requiring a
- // protective branch.
- margin += margin / 4;
- }
- if (ShouldEmitVeneer(unresolved_branches_first_limit(), margin)) {
- EmitVeneers(need_protection);
- }
-}
bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
@@ -661,6 +575,10 @@
unresolved_branches_.insert(
std::pair<int, FarBranchInfo>(max_reachable_pc,
FarBranchInfo(pc_offset(), label)));
+ // Also maintain the next pool check.
+ next_veneer_pool_check_ =
+ Min(next_veneer_pool_check_,
+ max_reachable_pc - kVeneerDistanceCheckMargin);
}
return need_longer_range;
}
@@ -696,11 +614,10 @@
if (need_extra_instructions) {
b(&done, InvertCondition(cond));
- b(label);
+ B(label);
} else {
b(label, cond);
}
- CheckVeneers(!need_extra_instructions);
bind(&done);
}
@@ -714,11 +631,10 @@
if (need_extra_instructions) {
tbz(rt, bit_pos, &done);
- b(label);
+ B(label);
} else {
tbnz(rt, bit_pos, label);
}
- CheckVeneers(!need_extra_instructions);
bind(&done);
}
@@ -732,11 +648,10 @@
if (need_extra_instructions) {
tbnz(rt, bit_pos, &done);
- b(label);
+ B(label);
} else {
tbz(rt, bit_pos, label);
}
- CheckVeneers(!need_extra_instructions);
bind(&done);
}
@@ -750,11 +665,10 @@
if (need_extra_instructions) {
cbz(rt, &done);
- b(label);
+ B(label);
} else {
cbnz(rt, label);
}
- CheckVeneers(!need_extra_instructions);
bind(&done);
}
@@ -768,11 +682,10 @@
if (need_extra_instructions) {
cbnz(rt, &done);
- b(label);
+ B(label);
} else {
cbz(rt, label);
}
- CheckVeneers(!need_extra_instructions);
bind(&done);
}
@@ -2009,7 +1922,7 @@
void MacroAssembler::Call(Register target) {
- BlockConstPoolScope scope(this);
+ BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
@@ -2024,7 +1937,7 @@
void MacroAssembler::Call(Label* target) {
- BlockConstPoolScope scope(this);
+ BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
@@ -2041,7 +1954,7 @@
// MacroAssembler::CallSize is sensitive to changes in this function, as it
// requires to know how many instructions are used to branch to the target.
void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
- BlockConstPoolScope scope(this);
+ BlockPoolsScope scope(this);
#ifdef DEBUG
Label start_call;
Bind(&start_call);
@@ -4679,7 +4592,7 @@
// Emit the message string directly in the instruction stream.
{
- BlockConstPoolScope scope(this);
+ BlockPoolsScope scope(this);
Bind(&msg_address);
EmitStringData(GetBailoutReason(reason));
}
@@ -4860,7 +4773,7 @@
Adr(x0, &format_address);
// Emit the format string directly in the instruction stream.
- { BlockConstPoolScope scope(this);
+ { BlockPoolsScope scope(this);
Label after_data;
B(&after_data);
Bind(&format_address);
@@ -5025,7 +4938,7 @@
void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
const Label* smi_check) {
- Assembler::BlockConstPoolScope scope(masm);
+ Assembler::BlockPoolsScope scope(masm);
if (reg.IsValid()) {
ASSERT(smi_check->is_bound());
ASSERT(reg.Is64Bits());
=======================================
--- /branches/bleeding_edge/src/a64/macro-assembler-a64.h Fri Feb 28
10:39:36 2014 UTC
+++ /branches/bleeding_edge/src/a64/macro-assembler-a64.h Tue Mar 4
15:54:12 2014 UTC
@@ -2169,24 +2169,6 @@
// (!), the mechanism can be extended to generate special veneers for
really
// far targets.
- // Returns true if we should emit a veneer as soon as possible for a
branch
- // which can at most reach to specified pc.
- bool ShouldEmitVeneer(int max_reachable_pc,
- int margin = kVeneerDistanceMargin);
-
- // The maximum code size generated for a veneer. Currently one branch
- // instruction. This is for code size checking purposes, and can be
extended
- // in the future for example if we decide to add nops between the
veneers.
- static const int kMaxVeneerCodeSize = 1 * kInstructionSize;
-
- // Emits veneers for branches that are approaching their maximum range.
- // If need_protection is true, the veneers are protected by a branch
jumping
- // over the code.
- void EmitVeneers(bool need_protection);
- void EmitVeneersGuard();
- // Checks wether veneers need to be emitted at this point.
- void CheckVeneers(bool need_protection);
-
// Helps resolve branching to labels potentially out of range.
// If the label is not bound, it registers the information necessary to
later
// be able to emit a veneer for this branch if necessary.
@@ -2197,15 +2179,6 @@
// This function also checks wether veneers need to be emitted.
bool NeedExtraInstructionsOrRegisterBranch(Label *label,
ImmBranchType branch_type);
-
- private:
- // We generate a veneer for a branch if we reach within this distance of
the
- // limit of the range.
- static const int kVeneerDistanceMargin = 4 * KB;
- int unresolved_branches_first_limit() const {
- ASSERT(!unresolved_branches_.empty());
- return unresolved_branches_.begin()->first;
- }
};
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.