Revision: 20051
Author: [email protected]
Date: Wed Mar 19 09:38:20 2014 UTC
Log: Add out-of-line constant pool support to Arm.
This CL adds out-of-line constant pool support to Arm. A
ConstantPoolBuilder
object is introduced to manage building of the ConstantPoolArray for a given
code object.
This CL depends on the following CLs landing first:
https://codereview.chromium.org/138503002/
https://codereview.chromium.org/179813005/
https://codereview.chromium.org/183553003/
https://codereview.chromium.org/183803022/
https://codereview.chromium.org/183883011/
https://codereview.chromium.org/186733006/
https://codereview.chromium.org/188063002/
https://codereview.chromium.org/190793002/
https://codereview.chromium.org/190823002/
https://codereview.chromium.org/190833002/
https://codereview.chromium.org/190883002/
[email protected]
Review URL: https://codereview.chromium.org/191233003
http://code.google.com/p/v8/source/detail?r=20051
Modified:
/branches/bleeding_edge/src/a64/assembler-a64.cc
/branches/bleeding_edge/src/a64/assembler-a64.h
/branches/bleeding_edge/src/arm/assembler-arm-inl.h
/branches/bleeding_edge/src/arm/assembler-arm.cc
/branches/bleeding_edge/src/arm/assembler-arm.h
/branches/bleeding_edge/src/arm/builtins-arm.cc
/branches/bleeding_edge/src/arm/code-stubs-arm.cc
/branches/bleeding_edge/src/arm/full-codegen-arm.cc
/branches/bleeding_edge/src/arm/macro-assembler-arm.cc
/branches/bleeding_edge/src/assembler.h
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/ia32/assembler-ia32.cc
/branches/bleeding_edge/src/ia32/assembler-ia32.h
/branches/bleeding_edge/src/objects.h
/branches/bleeding_edge/src/x64/assembler-x64.cc
/branches/bleeding_edge/src/x64/assembler-x64.h
=======================================
--- /branches/bleeding_edge/src/a64/assembler-a64.cc Tue Mar 18 14:26:26
2014 UTC
+++ /branches/bleeding_edge/src/a64/assembler-a64.cc Wed Mar 19 09:38:20
2014 UTC
@@ -2793,6 +2793,19 @@
RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
#endif
}
+
+
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
} } // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/a64/assembler-a64.h Tue Mar 18 14:26:26
2014 UTC
+++ /branches/bleeding_edge/src/a64/assembler-a64.h Wed Mar 19 09:38:20
2014 UTC
@@ -1790,6 +1790,11 @@
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
// Returns true if we should emit a veneer as soon as possible for a
branch
// which can at most reach to specified pc.
=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm-inl.h Wed Mar 12 15:23:54
2014 UTC
+++ /branches/bleeding_edge/src/arm/assembler-arm-inl.h Wed Mar 19 09:38:20
2014 UTC
@@ -109,14 +109,28 @@
ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|| rmode_ == EMBEDDED_OBJECT
|| rmode_ == EXTERNAL_REFERENCE);
- return Assembler::target_pointer_address_at(pc_);
+ if (FLAG_enable_ool_constant_pool ||
+ Assembler::IsMovW(Memory::int32_at(pc_))) {
+ // We return the PC for ool constant pool since this function is used
by the
+ // serializerer and expects the address to reside within the code
object.
+ return reinterpret_cast<Address>(pc_);
+ } else {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_pointer_address_at(pc_);
+ }
}
Address RelocInfo::constant_pool_entry_address() {
ASSERT(IsInConstantPool());
- ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
- return Assembler::target_pointer_address_at(pc_);
+ if (FLAG_enable_ool_constant_pool) {
+ ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_constant_pool_address_at(pc_,
+
host_->constant_pool());
+ } else {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+ return Assembler::target_pointer_address_at(pc_);
+ }
}
@@ -408,6 +422,16 @@
Instr instr = Memory::int32_at(pc);
return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
}
+
+
+Address Assembler::target_constant_pool_address_at(
+ Address pc, ConstantPoolArray* constant_pool) {
+ ASSERT(constant_pool != NULL);
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ Instr instr = Memory::int32_at(pc);
+ return reinterpret_cast<Address>(constant_pool) +
+ GetLdrRegisterImmediateOffset(instr);
+}
Address Assembler::target_address_at(Address pc,
@@ -419,9 +443,14 @@
return reinterpret_cast<Address>(
(next_instr->ImmedMovwMovtValue() << 16) |
instr->ImmedMovwMovtValue());
+ } else if (FLAG_enable_ool_constant_pool) {
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ return Memory::Address_at(
+ target_constant_pool_address_at(pc, constant_pool));
+ } else {
+ ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
+ return Memory::Address_at(target_pointer_address_at(pc));
}
- ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
- return Memory::Address_at(target_pointer_address_at(pc));
}
@@ -439,7 +468,8 @@
// @ return address
Address candidate = pc - 2 * Assembler::kInstrSize;
Instr candidate_instr(Memory::int32_at(candidate));
- if (IsLdrPcImmediateOffset(candidate_instr)) {
+ if (IsLdrPcImmediateOffset(candidate_instr) |
+ IsLdrPpImmediateOffset(candidate_instr)) {
return candidate;
}
candidate = pc - 3 * Assembler::kInstrSize;
@@ -450,7 +480,8 @@
Address Assembler::return_address_from_call_start(Address pc) {
- if (IsLdrPcImmediateOffset(Memory::int32_at(pc))) {
+ if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
+ IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
return pc + kInstrSize * 2;
} else {
ASSERT(IsMovW(Memory::int32_at(pc)));
@@ -462,7 +493,11 @@
void Assembler::deserialization_set_special_target_at(
Address constant_pool_entry, Code* code, Address target) {
- Memory::Address_at(constant_pool_entry) = target;
+ if (FLAG_enable_ool_constant_pool) {
+ set_target_address_at(constant_pool_entry, code, target);
+ } else {
+ Memory::Address_at(constant_pool_entry) = target;
+ }
}
@@ -490,6 +525,10 @@
ASSERT(IsMovW(Memory::int32_at(pc)));
ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
CPU::FlushICache(pc, 2 * kInstrSize);
+ } else if (FLAG_enable_ool_constant_pool) {
+ ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+ Memory::Address_at(
+ target_constant_pool_address_at(pc, constant_pool)) = target;
} else {
ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
Memory::Address_at(target_pointer_address_at(pc)) = target;
=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm.cc Wed Mar 12 15:56:16
2014 UTC
+++ /branches/bleeding_edge/src/arm/assembler-arm.cc Wed Mar 19 09:38:20
2014 UTC
@@ -293,15 +293,20 @@
bool RelocInfo::IsCodedSpecially() {
- // The deserializer needs to know whether a pointer is specially coded.
Being
- // specially coded on ARM means that it is a movw/movt instruction. We
don't
- // generate those yet.
- return false;
+ // The deserializer needs to know whether a pointer is specially
coded. Being
+ // specially coded on ARM means that it is a movw/movt instruction, or
is an
+ // out of line constant pool entry. These only occur if
+ // FLAG_enable_ool_constant_pool is true.
+ return FLAG_enable_ool_constant_pool;
}
bool RelocInfo::IsInConstantPool() {
- return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
+ if (FLAG_enable_ool_constant_pool) {
+ return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_));
+ } else {
+ return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
+ }
}
@@ -480,9 +485,15 @@
// ldr rd, [pc, #offset]
const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
+// ldr rd, [pp, #offset]
+const Instr kLdrPpMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPpPattern = 5 * B24 | L | kRegister_r8_Code * B16;
// vldr dd, [pc, #offset]
const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11
* B8;
+// vldr dd, [pp, #offset]
+const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
+const Instr kVldrDPpPattern = 13 * B24 | L | kRegister_r8_Code * B16 | 11
* B8;
// blxcc rm
const Instr kBlxRegMask =
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
@@ -520,6 +531,7 @@
Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
: AssemblerBase(isolate, buffer, buffer_size),
recorded_ast_id_(TypeFeedbackId::None()),
+ constant_pool_builder_(),
positions_recorder_(this) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
num_pending_32_bit_reloc_info_ = 0;
@@ -542,11 +554,12 @@
void Assembler::GetCode(CodeDesc* desc) {
- // Emit constant pool if necessary.
- CheckConstPool(true, false);
- ASSERT(num_pending_32_bit_reloc_info_ == 0);
- ASSERT(num_pending_64_bit_reloc_info_ == 0);
-
+ if (!FLAG_enable_ool_constant_pool) {
+ // Emit constant pool if necessary.
+ CheckConstPool(true, false);
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ }
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -727,6 +740,13 @@
// ldr<cond> <Rd>, [pc +/- offset_12].
return (instr & kLdrPCMask) == kLdrPCPattern;
}
+
+
+bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // ldr<cond> <Rd>, [pp +/- offset_12].
+ return (instr & kLdrPpMask) == kLdrPpPattern;
+}
bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
@@ -734,6 +754,13 @@
// vldr<cond> <Dd>, [pc +/- offset_10].
return (instr & kVldrDPCMask) == kVldrDPCPattern;
}
+
+
+bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
+ // Check the instruction is indeed a
+ // vldr<cond> <Dd>, [pp +/- offset_10].
+ return (instr & kVldrDPpMask) == kVldrDPpPattern;
+}
bool Assembler::IsTstImmediate(Instr instr) {
@@ -1063,7 +1090,12 @@
static bool use_mov_immediate_load(const Operand& x,
const Assembler* assembler) {
- if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
+ if (assembler != NULL && !assembler->can_use_constant_pool()) {
+ // If there is no constant pool available, we must use an mov
immediate.
+ // TODO(rmcilroy): enable ARMv6 support.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ return true;
+ } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
(assembler == NULL || !assembler->predictable_code_size())) {
// Prefer movw / movt to constant pool if it is more efficient on the
CPU.
return true;
@@ -1106,22 +1138,30 @@
void Assembler::move_32_bit_immediate(Register rd,
const Operand& x,
Condition cond) {
- if (rd.code() != pc.code()) {
- if (use_mov_immediate_load(x, this)) {
- if (x.must_output_reloc_info(this)) {
- RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
- // Make sure the movw/movt doesn't get separated.
- BlockConstPoolFor(2);
- }
- emit(cond | 0x30*B20 | rd.code()*B12 |
- EncodeMovwImmediate(x.imm32_ & 0xffff));
- movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
- return;
- }
+ RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
+ if (x.must_output_reloc_info(this)) {
+ RecordRelocInfo(rinfo);
}
- RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
- ldr(rd, MemOperand(pc, 0), cond);
+ if (use_mov_immediate_load(x, this)) {
+ Register target = rd.code() == pc.code() ? ip : rd;
+ // TODO(rmcilroy): add ARMv6 support for immediate loads.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
+ // Make sure the movw/movt doesn't get separated.
+ BlockConstPoolFor(2);
+ }
+ emit(cond | 0x30*B20 | target.code()*B12 |
+ EncodeMovwImmediate(x.imm32_ & 0xffff));
+ movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
+ if (target.code() != rd.code()) {
+ mov(rd, target, LeaveCC, cond);
+ }
+ } else {
+ ASSERT(can_use_constant_pool());
+ ConstantPoolAddEntry(rinfo);
+ ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
+ }
}
@@ -2421,7 +2461,7 @@
int vd, d;
dst.split_code(&vd, &d);
emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
- } else if (FLAG_enable_vldr_imm) {
+ } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) {
// TODO(jfb) Temporarily turned off until we have constant blinding or
// some equivalent mitigation: an attacker can otherwise
control
// generated data which also happens to be executable, a
Very Bad
@@ -2437,8 +2477,9 @@
// The code could also randomize the order of values, though
// that's tricky because vldr has a limited reach.
Furthermore
// it breaks load locality.
- RecordRelocInfo(imm);
- vldr(dst, MemOperand(pc, 0));
+ RelocInfo rinfo(pc_, imm);
+ ConstantPoolAddEntry(rinfo);
+ vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
} else {
// Synthesise the double from ARM immediates.
uint32_t lo, hi;
@@ -3168,6 +3209,7 @@
ASSERT(rinfo.rmode() == RelocInfo::NONE64);
rinfo.set_pc(rinfo.pc() + pc_delta);
}
+ constant_pool_builder_.Relocate(pc_delta);
}
@@ -3203,28 +3245,16 @@
}
-void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
- UseConstantPoolMode mode) {
- // We do not try to reuse pool constants.
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
RelocInfo rinfo(pc_, rmode, data, NULL);
- if (((rmode >= RelocInfo::JS_RETURN) &&
- (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
- (rmode == RelocInfo::CONST_POOL) ||
- mode == DONT_USE_CONSTANT_POOL) {
- // Adjust code for new modes.
- ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
- || RelocInfo::IsJSReturn(rmode)
- || RelocInfo::IsComment(rmode)
- || RelocInfo::IsPosition(rmode)
- || RelocInfo::IsConstPool(rmode)
- || mode == DONT_USE_CONSTANT_POOL);
- // These modes do not need an entry in the constant pool.
- } else {
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
- }
+ RecordRelocInfo(rinfo);
+}
+
+
+void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
if (!RelocInfo::IsNone(rinfo.rmode())) {
// Don't record external references unless the heap will be serialized.
- if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
+ if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
@@ -3235,9 +3265,9 @@
}
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer
here
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_,
- rmode,
+ if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
+ RelocInfo reloc_info_with_ast_id(rinfo.pc(),
+ rinfo.rmode(),
RecordedAstId().ToInt(),
NULL);
ClearRecordedAstId();
@@ -3249,34 +3279,38 @@
}
-void Assembler::RecordRelocInfo(double data) {
- // We do not try to reuse pool constants.
- RelocInfo rinfo(pc_, data);
- RecordRelocInfoConstantPoolEntryHelper(rinfo);
-}
-
-
-void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo&
rinfo) {
- if (rinfo.rmode() == RelocInfo::NONE64) {
- ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
- if (num_pending_64_bit_reloc_info_ == 0) {
- first_const_pool_64_use_ = pc_offset();
- }
- pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) {
+ if (FLAG_enable_ool_constant_pool) {
+ constant_pool_builder_.AddEntry(this, rinfo);
} else {
- ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
- if (num_pending_32_bit_reloc_info_ == 0) {
- first_const_pool_32_use_ = pc_offset();
+ if (rinfo.rmode() == RelocInfo::NONE64) {
+ ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
+ if (num_pending_64_bit_reloc_info_ == 0) {
+ first_const_pool_64_use_ = pc_offset();
+ }
+ pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
+ } else {
+ ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
+ if (num_pending_32_bit_reloc_info_ == 0) {
+ first_const_pool_32_use_ = pc_offset();
+ }
+ pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
}
- pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo;
+ // Make sure the constant pool is not emitted in place of the next
+ // instruction for which we just recorded relocation info.
+ BlockConstPoolFor(1);
}
- // Make sure the constant pool is not emitted in place of the next
- // instruction for which we just recorded relocation info.
- BlockConstPoolFor(1);
}
void Assembler::BlockConstPoolFor(int instructions) {
+ if (FLAG_enable_ool_constant_pool) {
+ // Should be a no-op if using an out-of-line constant pool.
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ return;
+ }
+
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// Max pool start (if we need a jump and an alignment).
@@ -3298,6 +3332,13 @@
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
+ if (FLAG_enable_ool_constant_pool) {
+ // Should be a no-op if using an out-of-line constant pool.
+ ASSERT(num_pending_32_bit_reloc_info_ == 0);
+ ASSERT(num_pending_64_bit_reloc_info_ == 0);
+ return;
+ }
+
// Some short sequence of instruction mustn't be broken up by constant
pool
// emission, such sequences are protected by calls to BlockConstPoolFor
and
// BlockConstPoolScope.
@@ -3493,6 +3534,195 @@
// the standard interval.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
}
+
+
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ return constant_pool_builder_.Allocate(heap);
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ ASSERT(FLAG_enable_ool_constant_pool);
+ constant_pool_builder_.Populate(this, constant_pool);
+}
+
+
+ConstantPoolBuilder::ConstantPoolBuilder()
+ : entries_(),
+ merged_indexes_(),
+ count_of_64bit_(0),
+ count_of_code_ptr_(0),
+ count_of_heap_ptr_(0),
+ count_of_32bit_(0) { }
+
+
+bool ConstantPoolBuilder::IsEmpty() {
+ return entries_.size() == 0;
+}
+
+
+bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) {
+ return rmode == RelocInfo::NONE64;
+}
+
+
+bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) {
+ return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64;
+}
+
+
+bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
+ return RelocInfo::IsCodeTarget(rmode);
+}
+
+
+bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
+ return RelocInfo::IsGCRelocMode(rmode)
&& !RelocInfo::IsCodeTarget(rmode);
+}
+
+
+void ConstantPoolBuilder::AddEntry(Assembler* assm,
+ const RelocInfo& rinfo) {
+ RelocInfo::Mode rmode = rinfo.rmode();
+ ASSERT(rmode != RelocInfo::COMMENT &&
+ rmode != RelocInfo::POSITION &&
+ rmode != RelocInfo::STATEMENT_POSITION &&
+ rmode != RelocInfo::CONST_POOL);
+
+
+ // Try to merge entries which won't be patched.
+ int merged_index = -1;
+ if (RelocInfo::IsNone(rmode) ||
+ (!Serializer::enabled() && (rmode >= RelocInfo::CELL))) {
+ size_t i;
+ std::vector<RelocInfo>::const_iterator it;
+ for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
+ if (RelocInfo::IsEqual(rinfo, *it)) {
+ merged_index = i;
+ break;
+ }
+ }
+ }
+
+ entries_.push_back(rinfo);
+ merged_indexes_.push_back(merged_index);
+
+ if (merged_index == -1) {
+ // Not merged, so update the appropriate count.
+ if (Is64BitEntry(rmode)) {
+ count_of_64bit_++;
+ } else if (Is32BitEntry(rmode)) {
+ count_of_32bit_++;
+ } else if (IsCodePtrEntry(rmode)) {
+ count_of_code_ptr_++;
+ } else {
+ ASSERT(IsHeapPtrEntry(rmode));
+ count_of_heap_ptr_++;
+ }
+ }
+
+ // Check if we still have room for another entry given Arm's ldr and vldr
+ // immediate offset range.
+ if (!(is_uint12(ConstantPoolArray::SizeFor(count_of_64bit_,
+ count_of_code_ptr_,
+ count_of_heap_ptr_,
+ count_of_32bit_))) &&
+ is_uint10(ConstantPoolArray::SizeFor(count_of_64bit_, 0, 0, 0))) {
+ assm->set_constant_pool_full();
+ }
+}
+
+
+void ConstantPoolBuilder::Relocate(int pc_delta) {
+ for (std::vector<RelocInfo>::iterator rinfo = entries_.begin();
+ rinfo != entries_.end(); rinfo++) {
+ ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN);
+ rinfo->set_pc(rinfo->pc() + pc_delta);
+ }
+}
+
+
+MaybeObject* ConstantPoolBuilder::Allocate(Heap* heap) {
+ if (IsEmpty()) {
+ return heap->empty_constant_pool_array();
+ } else {
+ return heap->AllocateConstantPoolArray(count_of_64bit_,
count_of_code_ptr_,
+ count_of_heap_ptr_,
count_of_32bit_);
+ }
+}
+
+
+void ConstantPoolBuilder::Populate(Assembler* assm,
+ ConstantPoolArray* constant_pool) {
+ ASSERT(constant_pool->count_of_int64_entries() == count_of_64bit_);
+ ASSERT(constant_pool->count_of_code_ptr_entries() == count_of_code_ptr_);
+ ASSERT(constant_pool->count_of_heap_ptr_entries() == count_of_heap_ptr_);
+ ASSERT(constant_pool->count_of_int32_entries() == count_of_32bit_);
+ ASSERT(entries_.size() == merged_indexes_.size());
+
+ int index_64bit = 0;
+ int index_code_ptr = count_of_64bit_;
+ int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_;
+ int index_32bit = count_of_64bit_ + count_of_code_ptr_ +
count_of_heap_ptr_;
+
+ size_t i;
+ std::vector<RelocInfo>::const_iterator rinfo;
+ for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++,
i++) {
+ RelocInfo::Mode rmode = rinfo->rmode();
+
+ // Update constant pool if necessary and get the entry's offset.
+ int offset;
+ if (merged_indexes_[i] == -1) {
+ if (Is64BitEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_64bit) -
kHeapObjectTag;
+ constant_pool->set(index_64bit++, rinfo->data64());
+ } else if (Is32BitEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_32bit) -
kHeapObjectTag;
+ constant_pool->set(index_32bit++,
static_cast<int32_t>(rinfo->data()));
+ } else if (IsCodePtrEntry(rmode)) {
+ offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
+ kHeapObjectTag;
+ constant_pool->set(index_code_ptr++,
+ reinterpret_cast<Object *>(rinfo->data()));
+ } else {
+ ASSERT(IsHeapPtrEntry(rmode));
+ offset = constant_pool->OffsetOfElementAt(index_heap_ptr) -
+ kHeapObjectTag;
+ constant_pool->set(index_heap_ptr++,
+ reinterpret_cast<Object *>(rinfo->data()));
+ }
+ merged_indexes_[i] = offset; // Stash offset for merged entries.
+ } else {
+ size_t merged_index = static_cast<size_t>(merged_indexes_[i]);
+ ASSERT(merged_index < merged_indexes_.size() && merged_index < i);
+ offset = merged_indexes_[merged_index];
+ }
+
+ // Patch vldr/ldr instruction with correct offset.
+ Instr instr = assm->instr_at(rinfo->pc());
+ if (Is64BitEntry(rmode)) {
+ // Instruction to patch must be 'vldr rd, [pp, #0]'.
+ ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) &&
+ Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
+ ASSERT(is_uint10(offset));
+ assm->instr_at_put(rinfo->pc(),
+ Assembler::SetVldrDRegisterImmediateOffset(instr, offset));
+ } else {
+ // Instruction to patch must be 'ldr rd, [pp, #0]'.
+ ASSERT((Assembler::IsLdrPpImmediateOffset(instr) &&
+ Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
+ ASSERT(is_uint12(offset));
+ assm->instr_at_put(rinfo->pc(),
+ Assembler::SetLdrRegisterImmediateOffset(instr, offset));
+ }
+ }
+
+ ASSERT((index_64bit == count_of_64bit_) &&
+ (index_code_ptr == (index_64bit + count_of_code_ptr_)) &&
+ (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
+ (index_32bit == (index_heap_ptr + count_of_32bit_)));
+}
} } // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm.h Wed Mar 12 15:56:16
2014 UTC
+++ /branches/bleeding_edge/src/arm/assembler-arm.h Wed Mar 19 09:38:20
2014 UTC
@@ -39,7 +39,10 @@
#ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ARM_ASSEMBLER_ARM_H_
+
#include <stdio.h>
+#include <vector>
+
#include "assembler.h"
#include "constants-arm.h"
#include "serialize.h"
@@ -702,9 +705,42 @@
NeonListType type_;
};
+
+// Class used to build a constant pool.
+class ConstantPoolBuilder BASE_EMBEDDED {
+ public:
+ explicit ConstantPoolBuilder();
+ void AddEntry(Assembler* assm, const RelocInfo& rinfo);
+ void Relocate(int pc_delta);
+ bool IsEmpty();
+ MaybeObject* Allocate(Heap* heap);
+ void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
+
+ inline int count_of_64bit() const { return count_of_64bit_; }
+ inline int count_of_code_ptr() const { return count_of_code_ptr_; }
+ inline int count_of_heap_ptr() const { return count_of_heap_ptr_; }
+ inline int count_of_32bit() const { return count_of_32bit_; }
+
+ private:
+ bool Is64BitEntry(RelocInfo::Mode rmode);
+ bool Is32BitEntry(RelocInfo::Mode rmode);
+ bool IsCodePtrEntry(RelocInfo::Mode rmode);
+ bool IsHeapPtrEntry(RelocInfo::Mode rmode);
+
+ std::vector<RelocInfo> entries_;
+ std::vector<int> merged_indexes_;
+ int count_of_64bit_;
+ int count_of_code_ptr_;
+ int count_of_heap_ptr_;
+ int count_of_32bit_;
+};
+
+
extern const Instr kMovLrPc;
extern const Instr kLdrPCMask;
extern const Instr kLdrPCPattern;
+extern const Instr kLdrPpMask;
+extern const Instr kLdrPpPattern;
extern const Instr kBlxRegMask;
extern const Instr kBlxRegPattern;
extern const Instr kBlxIp;
@@ -1413,6 +1449,8 @@
static int GetBranchOffset(Instr instr);
static bool IsLdrRegisterImmediate(Instr instr);
static bool IsVldrDRegisterImmediate(Instr instr);
+ static bool IsLdrPpImmediateOffset(Instr instr);
+ static bool IsVldrDPpImmediateOffset(Instr instr);
static int GetLdrRegisterImmediateOffset(Instr instr);
static int GetVldrDRegisterImmediateOffset(Instr instr);
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
@@ -1458,6 +1496,12 @@
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
bool can_use_constant_pool() const {
return is_constant_pool_available() && !constant_pool_full_;
}
@@ -1584,6 +1628,8 @@
// Number of pending reloc info entries in the 64 bits buffer.
int num_pending_64_bit_reloc_info_;
+ ConstantPoolBuilder constant_pool_builder_;
+
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
@@ -1622,10 +1668,9 @@
};
// Record reloc info for current pc_
- void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0,
- UseConstantPoolMode mode = USE_CONSTANT_POOL);
- void RecordRelocInfo(double data);
- void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo);
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+ void RecordRelocInfo(const RelocInfo& rinfo);
+ void ConstantPoolAddEntry(const RelocInfo& rinfo);
friend class RelocInfo;
friend class CodePatcher;
=======================================
--- /branches/bleeding_edge/src/arm/builtins-arm.cc Fri Mar 14 15:11:58
2014 UTC
+++ /branches/bleeding_edge/src/arm/builtins-arm.cc Wed Mar 19 09:38:20
2014 UTC
@@ -962,20 +962,26 @@
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ ldr(r1, MemOperand(r0, Code::kDeoptimizationDataOffset -
kHeapObjectTag));
+ __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
- // Load the OSR entrypoint offset from the deoptimization data.
- // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
- __ ldr(r1, MemOperand(r1, FixedArray::OffsetOfElementAt(
- DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+ { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+ if (FLAG_enable_ool_constant_pool) {
+ __ ldr(pp, FieldMemOperand(r0, Code::kConstantPoolOffset));
+ }
- // Compute the target address = code_obj + header_size + osr_offset
- // <entry_addr> = <code_obj> + #header_size + <osr_offset>
- __ add(r0, r0, Operand::SmiUntag(r1));
- __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
+ DeoptimizationInputData::kOsrPcOffsetIndex)));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ add(r0, r0, Operand::SmiUntag(r1));
+ __ add(lr, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // And "return" to the OSR entry point of the function.
- __ Ret();
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+ }
}
=======================================
--- /branches/bleeding_edge/src/arm/code-stubs-arm.cc Mon Mar 17 15:43:33
2014 UTC
+++ /branches/bleeding_edge/src/arm/code-stubs-arm.cc Wed Mar 19 09:38:20
2014 UTC
@@ -1777,7 +1777,7 @@
Isolate* isolate = masm->isolate();
int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT :
StackFrame::ENTRY;
if (FLAG_enable_ool_constant_pool) {
- __ mov(r8, Operand(Smi::FromInt(marker)));
+ __ mov(r8, Operand(isolate->factory()->empty_constant_pool_array()));
}
__ mov(r7, Operand(Smi::FromInt(marker)));
__ mov(r6, Operand(Smi::FromInt(marker)));
=======================================
--- /branches/bleeding_edge/src/arm/full-codegen-arm.cc Tue Mar 18 11:36:19
2014 UTC
+++ /branches/bleeding_edge/src/arm/full-codegen-arm.cc Wed Mar 19 09:38:20
2014 UTC
@@ -4835,8 +4835,7 @@
load_address -= Assembler::kInstrSize;
ASSERT(Assembler::IsMovW(Memory::int32_at(load_address)));
} else {
- // TODO(rmcilroy): uncomment when IsLdrPpImmediateOffset lands.
- // ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
+
ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
}
return load_address;
}
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Wed Mar 19
07:01:08 2014 UTC
+++ /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Wed Mar 19
09:38:20 2014 UTC
@@ -132,6 +132,10 @@
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(true);
}
+
+ // Check the expected size before generating code to ensure we assume
the same
+ // constant pool availability (e.g., whether constant pool is full or
not).
+ int expected_size = CallSize(target, rmode, cond);
// Call sequence on V7 or later may be :
// movw ip, #... @ call address low 16
@@ -153,7 +157,7 @@
mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
blx(ip, cond);
- ASSERT_EQ(CallSize(target, rmode, cond),
SizeOfCodeGeneratedSince(&start));
+ ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
if (mode == NEVER_INLINE_TARGET_ADDRESS) {
set_predictable_code_size(old_predictable_code_size);
}
@@ -1054,6 +1058,8 @@
void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count,
bool restore_context) {
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+
// Optionally restore all double registers.
if (save_doubles) {
// Calculate the stack location of the saved doubles and restore them.
@@ -1068,7 +1074,6 @@
mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress,
isolate())));
str(r3, MemOperand(ip));
-
// Restore current context from top and clear it in debug mode.
if (restore_context) {
mov(ip, Operand(ExternalReference(Isolate::kContextAddress,
isolate())));
@@ -1375,6 +1380,11 @@
// Compute the handler entry address and jump to it. The handler table
is
// a fixed array of (smi-tagged) code offsets.
// r0 = exception, r1 = code object, r2 = state.
+
+ ConstantPoolUnavailableScope constant_pool_unavailable(this);
+ if (FLAG_enable_ool_constant_pool) {
+ ldr(pp, FieldMemOperand(r1, Code::kConstantPoolOffset)); // Constant
pool.
+ }
ldr(r3, FieldMemOperand(r1, Code::kHandlerTableOffset)); // Handler
table.
add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
mov(r2, Operand(r2, LSR, StackHandler::kKindWidth)); // Handler index.
@@ -3555,22 +3565,31 @@
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
- Register result) {
+ Register result) {
const uint32_t kLdrOffsetMask = (1 << 12) - 1;
- const int32_t kPCRegOffset = 2 * kPointerSize;
ldr(result, MemOperand(ldr_location));
if (emit_debug_code()) {
- // Check that the instruction is a ldr reg, [pc + offset] .
- and_(result, result, Operand(kLdrPCPattern));
- cmp(result, Operand(kLdrPCPattern));
- Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
+ // Check that the instruction is a ldr reg, [<pc or pp> + offset] .
+ if (FLAG_enable_ool_constant_pool) {
+ and_(result, result, Operand(kLdrPpPattern));
+ cmp(result, Operand(kLdrPpPattern));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromPp);
+ } else {
+ and_(result, result, Operand(kLdrPCPattern));
+ cmp(result, Operand(kLdrPCPattern));
+ Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
+ }
// Result was clobbered. Restore it.
ldr(result, MemOperand(ldr_location));
}
// Get the address of the constant.
and_(result, result, Operand(kLdrOffsetMask));
- add(result, ldr_location, Operand(result));
- add(result, result, Operand(kPCRegOffset));
+ if (FLAG_enable_ool_constant_pool) {
+ add(result, pp, Operand(result));
+ } else {
+ add(result, ldr_location, Operand(result));
+ add(result, result, Operand(Instruction::kPCReadOffset));
+ }
}
=======================================
--- /branches/bleeding_edge/src/assembler.h Fri Mar 14 15:14:42 2014 UTC
+++ /branches/bleeding_edge/src/assembler.h Wed Mar 19 09:38:20 2014 UTC
@@ -374,6 +374,15 @@
return mode == CODE_AGE_SEQUENCE;
}
static inline int ModeMask(Mode mode) { return 1 << mode; }
+
+ // Returns true if the first RelocInfo has the same mode and raw data as
the
+ // second one.
+ static inline bool IsEqual(RelocInfo first, RelocInfo second) {
+ return first.rmode() == second.rmode() &&
+ (first.rmode() == RelocInfo::NONE64 ?
+ first.raw_data64() == second.raw_data64() :
+ first.data() == second.data());
+ }
// Accessors
byte* pc() const { return pc_; }
=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Mar 19 09:27:42 2014 UTC
+++ /branches/bleeding_edge/src/heap.cc Wed Mar 19 09:38:20 2014 UTC
@@ -4047,11 +4047,19 @@
bool immovable,
bool crankshafted,
int prologue_offset) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
+ // Allocate ByteArray and ConstantPoolArray before the Code object, so
that we
+ // do not risk leaving uninitialized Code object (and breaking the heap).
ByteArray* reloc_info;
MaybeObject* maybe_reloc_info = AllocateByteArray(desc.reloc_size,
TENURED);
if (!maybe_reloc_info->To(&reloc_info)) return maybe_reloc_info;
+
+ ConstantPoolArray* constant_pool;
+ if (FLAG_enable_ool_constant_pool) {
+ MaybeObject* maybe_constant_pool =
desc.origin->AllocateConstantPool(this);
+ if (!maybe_constant_pool->To(&constant_pool)) return
maybe_constant_pool;
+ } else {
+ constant_pool = empty_constant_pool_array();
+ }
// Compute size.
int body_size = RoundUp(desc.instr_size, kObjectAlignment);
@@ -4099,7 +4107,11 @@
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
code->set_marked_for_deoptimization(false);
}
- code->set_constant_pool(empty_constant_pool_array());
+
+ if (FLAG_enable_ool_constant_pool) {
+ desc.origin->PopulateConstantPool(constant_pool);
+ }
+ code->set_constant_pool(constant_pool);
#ifdef ENABLE_DEBUGGER_SUPPORT
if (code->kind() == Code::FUNCTION) {
@@ -4130,9 +4142,20 @@
MaybeObject* Heap::CopyCode(Code* code) {
+ MaybeObject* maybe_result;
+ Object* new_constant_pool;
+ if (FLAG_enable_ool_constant_pool &&
+ code->constant_pool() != empty_constant_pool_array()) {
+ // Copy the constant pool, since edits to the copied code may modify
+ // the constant pool.
+ maybe_result = CopyConstantPoolArray(code->constant_pool());
+ if (!maybe_result->ToObject(&new_constant_pool)) return maybe_result;
+ } else {
+ new_constant_pool = empty_constant_pool_array();
+ }
+
// Allocate an object the same size as the code object.
int obj_size = code->Size();
- MaybeObject* maybe_result;
if (obj_size > code_space()->AreaSize()) {
maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
} else {
@@ -4146,8 +4169,12 @@
Address old_addr = code->address();
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
CopyBlock(new_addr, old_addr, obj_size);
+ Code* new_code = Code::cast(result);
+
+ // Update the constant pool.
+ new_code->set_constant_pool(new_constant_pool);
+
// Relocate the copy.
- Code* new_code = Code::cast(result);
ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address()));
new_code->Relocate(new_addr - old_addr);
@@ -4156,8 +4183,8 @@
MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
- // Allocate ByteArray before the Code object, so that we do not risk
- // leaving uninitialized Code object (and breaking the heap).
+ // Allocate ByteArray and ConstantPoolArray before the Code object, so
that we
+ // do not risk leaving uninitialized Code object (and breaking the heap).
Object* reloc_info_array;
{ MaybeObject* maybe_reloc_info_array =
AllocateByteArray(reloc_info.length(), TENURED);
@@ -4165,6 +4192,18 @@
return maybe_reloc_info_array;
}
}
+ Object* new_constant_pool;
+ if (FLAG_enable_ool_constant_pool &&
+ code->constant_pool() != empty_constant_pool_array()) {
+ // Copy the constant pool, since edits to the copied code may modify
+ // the constant pool.
+ MaybeObject* maybe_constant_pool =
+ CopyConstantPoolArray(code->constant_pool());
+ if (!maybe_constant_pool->ToObject(&new_constant_pool))
+ return maybe_constant_pool;
+ } else {
+ new_constant_pool = empty_constant_pool_array();
+ }
int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
@@ -4194,6 +4233,9 @@
Code* new_code = Code::cast(result);
new_code->set_relocation_info(ByteArray::cast(reloc_info_array));
+ // Update constant pool.
+ new_code->set_constant_pool(new_constant_pool);
+
// Copy patched rinfo.
CopyBytes(new_code->relocation_start(),
reloc_info.start(),
@@ -5310,7 +5352,7 @@
}
if (number_of_heap_ptr_entries > 0) {
int offset =
-
constant_pool->OffsetOfElementAt(constant_pool->first_code_ptr_index());
+
constant_pool->OffsetOfElementAt(constant_pool->first_heap_ptr_index());
MemsetPointer(
HeapObject::RawField(constant_pool, offset),
undefined_value(),
=======================================
--- /branches/bleeding_edge/src/ia32/assembler-ia32.cc Mon Mar 10 18:47:57
2014 UTC
+++ /branches/bleeding_edge/src/ia32/assembler-ia32.cc Wed Mar 19 09:38:20
2014 UTC
@@ -2715,6 +2715,19 @@
RelocInfo rinfo(pc_, rmode, data, NULL);
reloc_info_writer.Write(&rinfo);
}
+
+
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
#ifdef GENERATED_CODE_COVERAGE
=======================================
--- /branches/bleeding_edge/src/ia32/assembler-ia32.h Wed Mar 12 15:23:54
2014 UTC
+++ /branches/bleeding_edge/src/ia32/assembler-ia32.h Wed Mar 19 09:38:20
2014 UTC
@@ -1188,6 +1188,12 @@
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
protected:
void emit_sse_operand(XMMRegister reg, const Operand& adr);
=======================================
--- /branches/bleeding_edge/src/objects.h Tue Mar 18 14:15:09 2014 UTC
+++ /branches/bleeding_edge/src/objects.h Wed Mar 19 09:38:20 2014 UTC
@@ -1326,6 +1326,8 @@
V(kTheInstructionShouldBeAnOri, "The instruction should be an
ori") \
V(kTheInstructionToPatchShouldBeALoadFromPc,
\
"The instruction to patch should be a load from
pc") \
+
V(kTheInstructionToPatchShouldBeALoadFromPp,
\
+ "The instruction to patch should be a load from
pp") \
V(kTheInstructionToPatchShouldBeAnLdrLiteral,
\
"The instruction to patch should be a ldr
literal") \
V(kTheInstructionToPatchShouldBeALui,
\
=======================================
--- /branches/bleeding_edge/src/x64/assembler-x64.cc Wed Mar 19 08:59:04
2014 UTC
+++ /branches/bleeding_edge/src/x64/assembler-x64.cc Wed Mar 19 09:38:20
2014 UTC
@@ -3197,6 +3197,19 @@
RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
}
}
+
+
+MaybeObject* Assembler::AllocateConstantPool(Heap* heap) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+ return NULL;
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+ // No out-of-line constant pool support.
+ UNREACHABLE();
+}
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
=======================================
--- /branches/bleeding_edge/src/x64/assembler-x64.h Wed Mar 19 08:59:04
2014 UTC
+++ /branches/bleeding_edge/src/x64/assembler-x64.h Wed Mar 19 09:38:20
2014 UTC
@@ -1462,6 +1462,12 @@
// Use --code-comments to enable.
void RecordComment(const char* msg, bool force = false);
+ // Allocate a constant pool of the correct size for the generated code.
+ MaybeObject* AllocateConstantPool(Heap* heap);
+
+ // Generate the constant pool for the generated code.
+ void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
// Writes a single word of data in the code stream.
// Used for inline tables, e.g., jump-tables.
void db(uint8_t data);
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.