Revision: 7132
Author: [email protected]
Date: Thu Mar 10 05:58:20 2011
Log: ARM: Port r7089 to ARM
Ensure that there is always enough bytes between consequtive calls in
optimized code to write a call instruction at the return points without
overlapping.
Add a call to deoptimize all functions after running tests with
--stress-opt. This will catch some issues with functions which cannot be
forcefully deoptimized. Some of the tests failed on ARM with that change
without the rest of the changes in this change.
Review URL: http://codereview.chromium.org/6661022
http://code.google.com/p/v8/source/detail?r=7132
Modified:
/branches/bleeding_edge/include/v8-testing.h
/branches/bleeding_edge/samples/shell.cc
/branches/bleeding_edge/src/api.cc
/branches/bleeding_edge/src/arm/assembler-arm.cc
/branches/bleeding_edge/src/arm/assembler-arm.h
/branches/bleeding_edge/src/arm/deoptimizer-arm.cc
/branches/bleeding_edge/src/arm/lithium-codegen-arm.cc
/branches/bleeding_edge/src/arm/lithium-codegen-arm.h
/branches/bleeding_edge/src/arm/macro-assembler-arm.cc
/branches/bleeding_edge/src/arm/macro-assembler-arm.h
/branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc
/branches/bleeding_edge/src/x64/deoptimizer-x64.cc
=======================================
--- /branches/bleeding_edge/include/v8-testing.h Tue Dec 7 03:31:57 2010
+++ /branches/bleeding_edge/include/v8-testing.h Thu Mar 10 05:58:20 2011
@@ -87,6 +87,11 @@
* should be between 0 and one less than the result from GetStressRuns()
*/
static void PrepareStressRun(int run);
+
+ /**
+ * Force deoptimization of all functions.
+ */
+ static void DeoptimizeAll();
};
=======================================
--- /branches/bleeding_edge/samples/shell.cc Wed Feb 16 03:40:48 2011
+++ /branches/bleeding_edge/samples/shell.cc Thu Mar 10 05:58:20 2011
@@ -65,6 +65,11 @@
// Create a new execution environment containing the built-in
// functions
v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
+ if (context.IsEmpty()) {
+ printf("Error creating context\n");
+ return 1;
+ }
+
bool run_shell = (argc == 1);
for (int i = 1; i < argc; i++) {
// Enter the execution environment before evaluating any code.
@@ -139,6 +144,8 @@
v8::Testing::PrepareStressRun(i);
result = RunMain(argc, argv);
}
+ printf("======== Full Deoptimization =======\n");
+ v8::Testing::DeoptimizeAll();
} else {
result = RunMain(argc, argv);
}
=======================================
--- /branches/bleeding_edge/src/api.cc Thu Mar 10 04:05:31 2011
+++ /branches/bleeding_edge/src/api.cc Thu Mar 10 05:58:20 2011
@@ -5141,6 +5141,11 @@
}
#endif
}
+
+
+void Testing::DeoptimizeAll() {
+ internal::Deoptimizer::DeoptimizeAll();
+}
namespace internal {
=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm.cc Thu Feb 17 07:25:38
2011
+++ /branches/bleeding_edge/src/arm/assembler-arm.cc Thu Mar 10 05:58:20
2011
@@ -767,11 +767,35 @@
}
-bool Operand::is_single_instruction() const {
+bool Operand::is_single_instruction(Instr instr) const {
if (rm_.is_valid()) return true;
- if (must_use_constant_pool()) return false;
uint32_t dummy1, dummy2;
- return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
+ if (must_use_constant_pool() ||
+ !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
+ // The immediate operand cannot be encoded as a shifter operand, or
use of
+ // constant pool is required. For a mov instruction not setting the
+ // condition code additional instruction conventions can be used.
+ if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
+ if (must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
+ // mov instruction will be an ldr from constant pool (one
instruction).
+ return true;
+ } else {
+ // mov instruction will be a mov or movw followed by movt (two
+ // instructions).
+ return false;
+ }
+ } else {
+ // If this is not a mov or mvn instruction there will always an
additional
+ // instructions - either mov or ldr. The mov might actually be two
+ // instructions mov or movw followed by movt so including the actual
+ // instruction two or three instructions will be generated.
+ return false;
+ }
+ } else {
+ // No use of constant pool and the immediate operand can be encoded as
a
+ // shifter operand.
+ return true;
+ }
}
=======================================
--- /branches/bleeding_edge/src/arm/assembler-arm.h Wed Mar 2 01:31:42 2011
+++ /branches/bleeding_edge/src/arm/assembler-arm.h Thu Mar 10 05:58:20 2011
@@ -389,8 +389,11 @@
INLINE(bool is_reg() const);
// Return true if this operand fits in one instruction so that no
- // 2-instruction solution with a load into the ip register is necessary.
- bool is_single_instruction() const;
+ // 2-instruction solution with a load into the ip register is necessary.
If
+ // the instruction this operand is used for is a MOV or MVN instruction
the
+ // actual instruction to use is required for this calculation. For other
+ // instructions instr is ignored.
+ bool is_single_instruction(Instr instr = 0) const;
bool must_use_constant_pool() const;
inline int32_t immediate() const {
=======================================
--- /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Tue Feb 22 08:56:57
2011
+++ /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Thu Mar 10 05:58:20
2011
@@ -46,6 +46,7 @@
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
@@ -69,8 +70,6 @@
int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
// Check that we did not shoot past next safepoint.
- // TODO(srdjan): How do we guarantee that safepoint code does not
- // overlap other safepoint patching code?
CHECK(pc_offset >= last_pc_offset);
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
@@ -117,6 +116,11 @@
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+#ifdef DEBUG
+ if (FLAG_print_code) {
+ code->PrintLn();
+ }
+#endif
}
}
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Wed Mar 9
07:01:16 2011
+++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Thu Mar 10
05:58:20 2011
@@ -34,7 +34,7 @@
namespace internal {
-class SafepointGenerator : public PostCallGenerator {
+class SafepointGenerator : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
@@ -44,7 +44,24 @@
deoptimization_index_(deoptimization_index) { }
virtual ~SafepointGenerator() { }
- virtual void Generate() {
+ virtual void BeforeCall(int call_size) {
+ ASSERT(call_size >= 0);
+ // Ensure that we have enough space after the previous safepoint
position
+ // for the generated code there.
+ int call_end = codegen_->masm()->pc_offset() + call_size;
+ int prev_jump_end =
+ codegen_->LastSafepointEnd() + Deoptimizer::patch_size();
+ if (call_end < prev_jump_end) {
+ int padding_size = prev_jump_end - call_end;
+ ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+ while (padding_size > 0) {
+ codegen_->masm()->nop();
+ padding_size -= Assembler::kInstrSize;
+ }
+ }
+ }
+
+ virtual void AfterCall() {
codegen_->RecordSafepoint(pointers_, deoptimization_index_);
}
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.h Tue Mar 8
02:29:40 2011
+++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.h Thu Mar 10
05:58:20 2011
@@ -229,6 +229,9 @@
int arguments,
int deoptimization_index);
void RecordPosition(int position);
+ int LastSafepointEnd() {
+ return static_cast<int>(safepoints_.GetPcAfterGap());
+ }
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Thu Mar 3
04:21:37 2011
+++ /branches/bleeding_edge/src/arm/macro-assembler-arm.cc Thu Mar 10
05:58:20 2011
@@ -101,21 +101,56 @@
// 'code' is always generated ARM code, never THUMB code
Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
}
+
+
+int MacroAssembler::CallSize(Register target, Condition cond) {
+#if USE_BLX
+ return kInstrSize;
+#else
+ return 2 * kInstrSize;
+#endif
+}
void MacroAssembler::Call(Register target, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
#if USE_BLX
blx(target, cond);
#else
// set lr for return at current pc + 8
- mov(lr, Operand(pc), LeaveCC, cond);
- mov(pc, Operand(target), LeaveCC, cond);
+ { BlockConstPoolScope block_const_pool(this);
+ mov(lr, Operand(pc), LeaveCC, cond);
+ mov(pc, Operand(target), LeaveCC, cond);
+ }
#endif
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, cond), post_position);
+#endif
}
-void MacroAssembler::Call(intptr_t target, RelocInfo::Mode rmode,
- Condition cond) {
+int MacroAssembler::CallSize(
+ intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+ int size = 2 * kInstrSize;
+ Instr mov_instr = cond | MOV | LeaveCC;
+ if (!Operand(target, rmode).is_single_instruction(mov_instr)) {
+ size += kInstrSize;
+ }
+ return size;
+}
+
+
+void MacroAssembler::Call(
+ intptr_t target, RelocInfo::Mode rmode, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
#if USE_BLX
// On ARMv5 and after the recommended call sequence is:
// ldr ip, [pc, #...]
@@ -137,28 +172,64 @@
ASSERT(kCallTargetAddressOffset == 2 * kInstrSize);
#else
- // Set lr for return at current pc + 8.
- mov(lr, Operand(pc), LeaveCC, cond);
- // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
- mov(pc, Operand(target, rmode), LeaveCC, cond);
-
+ { BlockConstPoolScope block_const_pool(this);
+ // Set lr for return at current pc + 8.
+ mov(lr, Operand(pc), LeaveCC, cond);
+ // Emit a ldr<cond> pc, [pc + offset of target in constant pool].
+ mov(pc, Operand(target, rmode), LeaveCC, cond);
+ }
ASSERT(kCallTargetAddressOffset == kInstrSize);
#endif
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
+#endif
}
-void MacroAssembler::Call(byte* target, RelocInfo::Mode rmode,
- Condition cond) {
+int MacroAssembler::CallSize(
+ byte* target, RelocInfo::Mode rmode, Condition cond) {
+ return CallSize(reinterpret_cast<intptr_t>(target), rmode);
+}
+
+
+void MacroAssembler::Call(
+ byte* target, RelocInfo::Mode rmode, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
ASSERT(!RelocInfo::IsCodeTarget(rmode));
Call(reinterpret_cast<intptr_t>(target), rmode, cond);
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(target, rmode, cond), post_position);
+#endif
}
-void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
- Condition cond) {
+int MacroAssembler::CallSize(
+ Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
+ return CallSize(reinterpret_cast<intptr_t>(code.location()), rmode,
cond);
+}
+
+
+void MacroAssembler::Call(
+ Handle<Code> code, RelocInfo::Mode rmode, Condition cond) {
+#ifdef DEBUG
+ int pre_position = pc_offset();
+#endif
+
ASSERT(RelocInfo::IsCodeTarget(rmode));
// 'code' is always generated ARM code, never THUMB code
Call(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+
+#ifdef DEBUG
+ int post_position = pc_offset();
+ CHECK_EQ(pre_position + CallSize(code, rmode, cond), post_position);
+#endif
}
@@ -784,7 +855,7 @@
Register code_reg,
Label* done,
InvokeFlag flag,
- PostCallGenerator*
post_call_generator) {
+ CallWrapper* call_wrapper) {
bool definitely_matches = false;
Label regular_invoke;
@@ -839,8 +910,11 @@
Handle<Code> adaptor =
Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
if (flag == CALL_FUNCTION) {
+ if (call_wrapper != NULL) {
+ call_wrapper->BeforeCall(CallSize(adaptor,
RelocInfo::CODE_TARGET));
+ }
Call(adaptor, RelocInfo::CODE_TARGET);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ if (call_wrapper != NULL) call_wrapper->AfterCall();
b(done);
} else {
Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -854,14 +928,15 @@
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator) {
+ CallWrapper* call_wrapper) {
Label done;
InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
- post_call_generator);
+ call_wrapper);
if (flag == CALL_FUNCTION) {
+ if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
Call(code);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ if (call_wrapper != NULL) call_wrapper->AfterCall();
} else {
ASSERT(flag == JUMP_FUNCTION);
Jump(code);
@@ -896,7 +971,7 @@
void MacroAssembler::InvokeFunction(Register fun,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator*
post_call_generator) {
+ CallWrapper* call_wrapper) {
// Contract with called JS functions requires that function is passed in
r1.
ASSERT(fun.is(r1));
@@ -913,7 +988,7 @@
FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
ParameterCount expected(expected_reg);
- InvokeCode(code_reg, expected, actual, flag, post_call_generator);
+ InvokeCode(code_reg, expected, actual, flag, call_wrapper);
}
@@ -2083,11 +2158,12 @@
void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
- PostCallGenerator* post_call_generator)
{
+ CallWrapper* call_wrapper) {
GetBuiltinEntry(r2, id);
if (flags == CALL_JS) {
+ if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(r2));
Call(r2);
- if (post_call_generator != NULL) post_call_generator->Generate();
+ if (call_wrapper != NULL) call_wrapper->AfterCall();
} else {
ASSERT(flags == JUMP_JS);
Jump(r2);
=======================================
--- /branches/bleeding_edge/src/arm/macro-assembler-arm.h Thu Mar 3
04:21:37 2011
+++ /branches/bleeding_edge/src/arm/macro-assembler-arm.h Thu Mar 10
05:58:20 2011
@@ -34,7 +34,7 @@
namespace internal {
// Forward declaration.
-class PostCallGenerator;
+class CallWrapper;
//
----------------------------------------------------------------------------
// Static helper functions
@@ -96,8 +96,11 @@
void Jump(Register target, Condition cond = al);
void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
+ int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(Handle<Code> code, RelocInfo::Mode rmode, Condition cond =
al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret(Condition cond = al);
@@ -343,7 +346,7 @@
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ CallWrapper* call_wrapper = NULL);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
@@ -356,7 +359,7 @@
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ CallWrapper* call_wrapper = NULL);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
@@ -748,7 +751,7 @@
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
- PostCallGenerator* post_call_generator = NULL);
+ CallWrapper* call_wrapper = NULL);
// Store the code object for the given builtin in the target register and
// setup the function in r1.
@@ -911,6 +914,7 @@
private:
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
+ int CallSize(intptr_t target, RelocInfo::Mode rmode, Condition cond =
al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
// Helper functions for generating invokes.
@@ -920,7 +924,7 @@
Register code_reg,
Label* done,
InvokeFlag flag,
- PostCallGenerator* post_call_generator = NULL);
+ CallWrapper* call_wrapper = NULL);
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -984,11 +988,15 @@
// Helper class for generating code or data associated with the code
// right after a call instruction. As an example this can be used to
// generate safepoint data after calls for crankshaft.
-class PostCallGenerator {
+class CallWrapper {
public:
- PostCallGenerator() { }
- virtual ~PostCallGenerator() { }
- virtual void Generate() = 0;
+ CallWrapper() { }
+ virtual ~CallWrapper() { }
+ // Called just before emitting a call. Argument is the size of the
generated
+ // call code.
+ virtual void BeforeCall(int call_size) = 0;
+ // Called just after emitting a call, i.e., at the return site for the
call.
+ virtual void AfterCall() = 0;
};
=======================================
--- /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Tue Feb 22
08:56:57 2011
+++ /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Thu Mar 10
05:58:20 2011
@@ -56,6 +56,7 @@
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
@@ -132,6 +133,11 @@
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
+#ifdef DEBUG
+ if (FLAG_print_code) {
+ code->PrintLn();
+ }
+#endif
}
}
=======================================
--- /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Mon Mar 7 00:35:19
2011
+++ /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Thu Mar 10 05:58:20
2011
@@ -102,6 +102,7 @@
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
+ HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
@@ -196,6 +197,11 @@
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
+#ifdef DEBUG
+ if (FLAG_print_code) {
+ code->PrintLn();
+ }
+#endif
}
}
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev