Revision: 12287
Author: [email protected]
Date: Fri Aug 10 05:24:06 2012
Log: Fix the full compiler on ARM to always generate the same code
regardless of the detected CPU. This is a requirement for the
debugger and the deoptimizer, which both expect that code from
the snapshot (compiled without VFP and ARM7) should have the
same layout as code compiled later.
This is another change to make snapshots more robust with
arbitrary code.
Review URL: https://chromiumcodereview.appspot.com/10824235
http://code.google.com/p/v8/source/detail?r=12287
Modified:
/branches/bleeding_edge/src/arm/assembler-arm.cc
/branches/bleeding_edge/src/arm/assembler-arm.h
/branches/bleeding_edge/src/arm/code-stubs-arm.cc
/branches/bleeding_edge/src/arm/deoptimizer-arm.cc
/branches/bleeding_edge/src/arm/full-codegen-arm.cc
/branches/bleeding_edge/src/arm/macro-assembler-arm.cc
/branches/bleeding_edge/src/arm/macro-assembler-arm.h
/branches/bleeding_edge/src/flag-definitions.h
=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm.cc Mon Aug 6 07:13:09
2012
+++ /branches/bleeding_edge/src/arm/assembler-arm.cc Fri Aug 10 05:24:06
2012
@@ -302,7 +302,8 @@
: AssemblerBase(arg_isolate),
recorded_ast_id_(TypeFeedbackId::None()),
positions_recorder_(this),
- emit_debug_code_(FLAG_debug_code) {
+ emit_debug_code_(FLAG_debug_code),
+ predictable_code_size_(false) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
@@ -784,13 +785,14 @@
// if they can be encoded in the ARM's 12 bits of immediate-offset
instruction
// space. There is no guarantee that the relocated location can be
similarly
// encoded.
-bool Operand::must_use_constant_pool() const {
+bool Operand::must_use_constant_pool(const Assembler* assembler) const {
if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
#ifdef DEBUG
if (!Serializer::enabled()) {
Serializer::TooLateToEnableNow();
}
#endif // def DEBUG
+ if (assembler != NULL && assembler->predictable_code_size()) return
true;
return Serializer::enabled();
} else if (rmode_ == RelocInfo::NONE) {
return false;
@@ -799,16 +801,17 @@
}
-bool Operand::is_single_instruction(Instr instr) const {
+bool Operand::is_single_instruction(const Assembler* assembler,
+ Instr instr) const {
if (rm_.is_valid()) return true;
uint32_t dummy1, dummy2;
- if (must_use_constant_pool() ||
+ if (must_use_constant_pool(assembler) ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or
use of
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (must_use_constant_pool() ||
+ if (must_use_constant_pool(assembler) ||
!CpuFeatures::IsSupported(ARMv7)) {
// mov instruction will be an ldr from constant pool (one
instruction).
return true;
@@ -842,7 +845,7 @@
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (x.must_use_constant_pool() ||
+ if (x.must_use_constant_pool(this) ||
!fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, so
load
// it first to register ip and change the original instruction to
use ip.
@@ -851,7 +854,7 @@
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
- if (x.must_use_constant_pool() ||
+ if (x.must_use_constant_pool(this) ||
!CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
@@ -863,7 +866,7 @@
} else {
// If this is not a mov or mvn instruction we may still be able to
avoid
// a constant pool entry by using mvn or movw.
- if (!x.must_use_constant_pool() &&
+ if (!x.must_use_constant_pool(this) &&
(instr & kMovMvnMask) != kMovMvnPattern) {
mov(ip, x, LeaveCC, cond);
} else {
@@ -1388,7 +1391,7 @@
// Immediate.
uint32_t rotate_imm;
uint32_t immed_8;
- if (src.must_use_constant_pool() ||
+ if (src.must_use_constant_pool(this) ||
!fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
// Immediate operand cannot be encoded, load it first to register ip.
RecordRelocInfo(src.rmode_, src.imm32_);
=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm.h Mon Aug 6 07:13:09 2012
+++ /branches/bleeding_edge/src/arm/assembler-arm.h Fri Aug 10 05:24:06 2012
@@ -424,8 +424,8 @@
// the instruction this operand is used for is a MOV or MVN instruction
the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
- bool is_single_instruction(Instr instr = 0) const;
- bool must_use_constant_pool() const;
+ bool is_single_instruction(const Assembler* assembler, Instr instr = 0)
const;
+ bool must_use_constant_pool(const Assembler* assembler) const;
inline int32_t immediate() const {
ASSERT(!rm_.is_valid());
@@ -648,8 +648,10 @@
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
- // Dummy for cross platform compatibility.
- void set_predictable_code_size(bool value) { }
+ // Avoids using instructions that vary in size in unpredictable ways
between
+ // the snapshot and the running VM. This is needed by the full compiler
so
+ // that it can recompile code with debug support and fix the PC.
+ void set_predictable_code_size(bool value) { predictable_code_size_ =
value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -1169,6 +1171,8 @@
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
+
+ bool predictable_code_size() const { return predictable_code_size_; }
// Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) {
@@ -1450,7 +1454,10 @@
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
+
bool emit_debug_code_;
+ bool predictable_code_size_;
+
friend class PositionsRecorder;
friend class EnsureSpace;
};
=======================================
--- /branches/bleeding_edge/src/arm/code-stubs-arm.cc Mon Aug 6 02:06:27
2012
+++ /branches/bleeding_edge/src/arm/code-stubs-arm.cc Fri Aug 10 05:24:06
2012
@@ -1859,11 +1859,9 @@
void ToBooleanStub::Generate(MacroAssembler* masm) {
// This stub overrides SometimesSetsUpAFrame() to return false. That
means
// we cannot call anything that could cause a GC from this stub.
- // This stub uses VFP3 instructions.
- CpuFeatures::Scope scope(VFP2);
-
Label patch;
const Register map = r9.is(tos_) ? r7 : r9;
+ const Register temp = map;
// undefined -> false.
CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
@@ -1916,13 +1914,56 @@
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, ¬_heap_number);
- __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ VFPCompareAndSetFlags(d1, 0.0);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN
+
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+
+ __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+ __ VFPCompareAndSetFlags(d1, 0.0);
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false
for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for
FP_ZERO
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for
FP_NAN
+ } else {
+ Label done, not_nan, not_zero;
+ __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
+ // -0 maps to false:
+ __ bic(
+ temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE),
SetCC);
+ __ b(ne, ¬_zero);
+ // If exponent word is zero then the answer depends on the mantissa
word.
+ __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
+ __ jmp(&done);
+
+ // Check for NaN.
+ __ bind(¬_zero);
+ // We already zeroed the sign bit, now shift out the mantissa so we
only
+ // have the exponent left.
+ __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
+ unsigned int shifted_exponent_mask =
+ HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
+ __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE));
+ __ b(ne, ¬_nan); // If exponent is not 0x7ff then it can't be a
NaN.
+
+ // Reload exponent word.
+ __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
+ __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE));
+ // If mantissa is not zero then we have a NaN, so return 0.
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ b(ne, &done);
+
+ // Load mantissa word.
+ __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
+ __ cmp(temp, Operand(0, RelocInfo::NONE));
+ // If mantissa is not zero then we have a NaN, so return 0.
+ __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne);
+ __ b(ne, &done);
+
+ __ bind(¬_nan);
+ __ mov(tos_, Operand(1, RelocInfo::NONE));
+ __ bind(&done);
+ }
__ Ret();
__ bind(¬_heap_number);
}
=======================================
--- /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Mon Aug 6 07:13:09
2012
+++ /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Fri Aug 10 05:24:06
2012
@@ -73,8 +73,11 @@
if (deopt_data->Pc(i)->value() == -1) continue;
Address call_address = code_start_address + deopt_data->Pc(i)->value();
Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
- int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
- RelocInfo::NONE);
+ // We need calls to have a predictable size in the unoptimized code,
but
+ // this is optimized code, so we don't have to have a predictable size.
+ int call_size_in_bytes =
+ MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry,
+ RelocInfo::NONE);
int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
ASSERT(call_size_in_bytes <= patch_size());
=======================================
--- /branches/bleeding_edge/src/arm/full-codegen-arm.cc Tue Aug 7 07:06:25
2012
+++ /branches/bleeding_edge/src/arm/full-codegen-arm.cc Fri Aug 10 05:24:06
2012
@@ -673,18 +673,9 @@
Label* if_true,
Label* if_false,
Label* fall_through) {
- if (CpuFeatures::IsSupported(VFP2)) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub);
- __ tst(result_register(), result_register());
- } else {
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(result_register());
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(ip, Heap::kFalseValueRootIndex);
- __ cmp(r0, ip);
- }
+ ToBooleanStub stub(result_register());
+ __ CallStub(&stub);
+ __ tst(result_register(), result_register());
Split(ne, if_true, if_false, fall_through);
}
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Mon Aug 6
07:13:09 2012
+++ /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Fri Aug 10
05:24:06 2012
@@ -137,11 +137,23 @@
int size = 2 * kInstrSize;
Instr mov_instr = cond | MOV | LeaveCC;
intptr_t immediate = reinterpret_cast<intptr_t>(target);
- if (!Operand(immediate, rmode).is_single_instruction(mov_instr)) {
+ if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
size += kInstrSize;
}
return size;
}
+
+
+int MacroAssembler::CallSizeNotPredictableCodeSize(
+ Address target, RelocInfo::Mode rmode, Condition cond) {
+ int size = 2 * kInstrSize;
+ Instr mov_instr = cond | MOV | LeaveCC;
+ intptr_t immediate = reinterpret_cast<intptr_t>(target);
+ if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
+ size += kInstrSize;
+ }
+ return size;
+}
void MacroAssembler::Call(Address target,
@@ -276,12 +288,12 @@
void MacroAssembler::And(Register dst, Register src1, const Operand& src2,
Condition cond) {
if (!src2.is_reg() &&
- !src2.must_use_constant_pool() &&
+ !src2.must_use_constant_pool(this) &&
src2.immediate() == 0) {
mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond);
- } else if (!src2.is_single_instruction() &&
- !src2.must_use_constant_pool() &&
+ } else if (!src2.is_single_instruction(this) &&
+ !src2.must_use_constant_pool(this) &&
CpuFeatures::IsSupported(ARMv7) &&
IsPowerOf2(src2.immediate() + 1)) {
ubfx(dst, src1, 0,
@@ -296,7 +308,7 @@
void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
if (lsb != 0) {
@@ -311,7 +323,7 @@
void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
Condition cond) {
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
and_(dst, src1, Operand(mask), LeaveCC, cond);
int shift_up = 32 - lsb - width;
@@ -339,7 +351,7 @@
ASSERT(lsb + width < 32);
ASSERT(!scratch.is(dst));
if (width == 0) return;
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
and_(scratch, src, Operand((1 << width) - 1));
@@ -353,7 +365,7 @@
void MacroAssembler::Bfc(Register dst, int lsb, int width, Condition cond)
{
ASSERT(lsb < 32);
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
bic(dst, dst, Operand(mask));
} else {
@@ -364,7 +376,7 @@
void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
Condition cond) {
- if (!CpuFeatures::IsSupported(ARMv7)) {
+ if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
ASSERT(!dst.is(pc) && !src.rm().is(pc));
ASSERT((satpos >= 0) && (satpos <= 31));
@@ -672,7 +684,7 @@
ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
// Generate two ldr instructions if ldrd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
CpuFeatures::Scope scope(ARMv7);
ldrd(dst1, dst2, src, cond);
} else {
@@ -714,7 +726,7 @@
ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
// Generate two str instructions if strd is not available.
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
CpuFeatures::Scope scope(ARMv7);
strd(src1, src2, dst, cond);
} else {
@@ -2586,7 +2598,7 @@
void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
- if (CpuFeatures::IsSupported(ARMv7)) {
+ if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) {
ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.h Mon Aug 6
07:13:09 2012
+++ /branches/bleeding_edge/src/arm/macro-assembler-arm.h Fri Aug 10
05:24:06 2012
@@ -110,14 +110,15 @@
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
static int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
- static int CallSize(Address target,
- RelocInfo::Mode rmode,
- Condition cond = al);
+ int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
+ static int CallSizeNotPredictableCodeSize(Address target,
+ RelocInfo::Mode rmode,
+ Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
- static int CallSize(Handle<Code> code,
- RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
- TypeFeedbackId ast_id = TypeFeedbackId::None(),
- Condition cond = al);
+ int CallSize(Handle<Code> code,
+ RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ TypeFeedbackId ast_id = TypeFeedbackId::None(),
+ Condition cond = al);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
TypeFeedbackId ast_id = TypeFeedbackId::None(),
=======================================
--- /branches/bleeding_edge/src/flag-definitions.h Tue Aug 7 07:17:35 2012
+++ /branches/bleeding_edge/src/flag-definitions.h Fri Aug 10 05:24:06 2012
@@ -240,7 +240,8 @@
"insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false,
"weight back edges by jump distance for interrupt triggering")
-DEFINE_int(interrupt_budget, 5900,
+ // 0x1700 fits in the immediate field of an ARM instruction.
+DEFINE_int(interrupt_budget, 0x1700,
"execution budget before interrupt is triggered")
DEFINE_int(type_info_threshold, 15,
"percentage of ICs that must have type info to allow
optimization")
@@ -273,12 +274,12 @@
"enable use of RDTSC instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
-DEFINE_bool(enable_vfp3, true,
+DEFINE_bool(enable_vfp3, false,
"enable use of VFP3 instructions if available - this implies "
"enabling ARMv7 and VFP2 instructions (ARM only)")
-DEFINE_bool(enable_vfp2, true,
+DEFINE_bool(enable_vfp2, false,
"enable use of VFP2 instructions if available")
-DEFINE_bool(enable_armv7, true,
+DEFINE_bool(enable_armv7, false,
"enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_fpu, true,
"enable use of MIPS FPU instructions if available (MIPS only)")
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev