Revision: 6611
Author: [email protected]
Date: Thu Feb 3 02:07:22 2011
Log: Streamline the code for patching optimized code for lazy deopt.
Rewrite the lazy deopt patching code on IA32 to use addresses throughout,
rather than offsets and a base address.
Also, rename a couple of ambiguous Code fields from _start to _offset.
Review URL: http://codereview.chromium.org/6334083
http://code.google.com/p/v8/source/detail?r=6611
Modified:
/branches/bleeding_edge/src/arm/deoptimizer-arm.cc
/branches/bleeding_edge/src/arm/lithium-codegen-arm.cc
/branches/bleeding_edge/src/deoptimizer.cc
/branches/bleeding_edge/src/disassembler.cc
/branches/bleeding_edge/src/full-codegen.cc
/branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc
/branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc
/branches/bleeding_edge/src/objects-inl.h
/branches/bleeding_edge/src/objects.cc
/branches/bleeding_edge/src/objects.h
/branches/bleeding_edge/src/runtime.cc
/branches/bleeding_edge/src/safepoint-table.cc
/branches/bleeding_edge/src/x64/deoptimizer-x64.cc
/branches/bleeding_edge/src/x64/lithium-codegen-x64.cc
=======================================
--- /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Wed Feb 2 05:55:29
2011
+++ /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Thu Feb 3 02:07:22
2011
@@ -97,7 +97,7 @@
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
int instructions =
- (code->safepoint_table_start() - last_pc_offset) /
Assembler::kInstrSize;
+ (code->safepoint_table_offset() - last_pc_offset) /
Assembler::kInstrSize;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (int x = 0; x < instructions; x++) {
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Thu Feb 3
01:10:54 2011
+++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Thu Feb 3
02:07:22 2011
@@ -223,7 +223,7 @@
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
=======================================
--- /branches/bleeding_edge/src/deoptimizer.cc Wed Feb 2 03:58:24 2011
+++ /branches/bleeding_edge/src/deoptimizer.cc Thu Feb 3 02:07:22 2011
@@ -817,7 +817,7 @@
// call to an unconditional call to the replacement code.
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
Address stack_check_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->stack_check_table_start();
+ unoptimized_code->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(stack_check_cursor);
stack_check_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
@@ -836,7 +836,7 @@
// stack check calls.
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
Address stack_check_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->stack_check_table_start();
+ unoptimized_code->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(stack_check_cursor);
stack_check_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
=======================================
--- /branches/bleeding_edge/src/disassembler.cc Thu Jan 6 06:13:40 2011
+++ /branches/bleeding_edge/src/disassembler.cc Thu Feb 3 02:07:22 2011
@@ -313,12 +313,12 @@
// Called by Code::CodePrint.
void Disassembler::Decode(FILE* f, Code* code) {
int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION)
- ? static_cast<int>(code->safepoint_table_start())
+ ? static_cast<int>(code->safepoint_table_offset())
: code->instruction_size();
// If there might be a stack check table, stop before reaching it.
if (code->kind() == Code::FUNCTION) {
decode_size =
- Min(decode_size,
static_cast<int>(code->stack_check_table_start()));
+ Min(decode_size,
static_cast<int>(code->stack_check_table_offset()));
}
byte* begin = code->instruction_start();
=======================================
--- /branches/bleeding_edge/src/full-codegen.cc Fri Jan 21 03:36:19 2011
+++ /branches/bleeding_edge/src/full-codegen.cc Thu Feb 3 02:07:22 2011
@@ -304,7 +304,7 @@
cgen.PopulateDeoptimizationData(code);
code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
code->set_allow_osr_at_loop_nesting_level(0);
- code->set_stack_check_table_start(table_offset);
+ code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // may be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
=======================================
--- /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Wed Feb 2
05:55:29 2011
+++ /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Thu Feb 3
02:07:22 2011
@@ -43,6 +43,16 @@
int Deoptimizer::patch_size() {
return Assembler::kCallInstructionLength;
}
+
+
+static void ZapCodeRange(Address start, Address end) {
+#ifdef DEBUG
+ ASSERT(start <= end);
+ int size = end - start;
+ CodePatcher destroyer(start, size);
+ while (size-- > 0) destroyer.masm()->int3();
+#endif
+}
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
@@ -52,90 +62,61 @@
// Get the optimized code.
Code* code = function->code();
-
- // For each return after a safepoint insert a absolute call to the
- // corresponding deoptimization entry.
- unsigned last_pc_offset = 0;
- SafepointTable table(function->code());
+ Address code_start_address = code->instruction_start();
// We will overwrite the code's relocation info in-place. Relocation info
- // is written backward. The relocation info is the payload of a byte
array.
- // Later on we will align this at the start of the byte array and create
- // a trash byte array of the remaining space.
+ // is written backward. The relocation info is the payload of a byte
+ // array. Later on we will slide this to the start of the byte array and
+ // create a filler object in the remaining space.
ByteArray* reloc_info = code->relocation_info();
- Address end_address = reloc_info->address() + reloc_info->Size();
- RelocInfoWriter reloc_info_writer(end_address,
code->instruction_start());
-
- for (unsigned i = 0; i < table.length(); i++) {
- unsigned pc_offset = table.GetPcOffset(i);
+ Address reloc_end_address = reloc_info->address() + reloc_info->Size();
+ RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
+
+ // For each return after a safepoint insert a call to the corresponding
+ // deoptimization entry. Since the call is a relative encoding, write
new
+ // reloc info. We do not need any of the existing reloc info because the
+ // existing code will not be used again (we zap it in debug builds).
+ SafepointTable table(code);
+ Address prev_address = code_start_address;
+ for (unsigned i = 0; i < table.length(); ++i) {
+ Address curr_address = code_start_address + table.GetPcOffset(i);
+ ZapCodeRange(prev_address, curr_address);
+
SafepointEntry safepoint_entry = table.GetEntry(i);
int deoptimization_index = safepoint_entry.deoptimization_index();
- int gap_code_size = safepoint_entry.gap_code_size();
-#ifdef DEBUG
- // Destroy the code which is not supposed to run again.
- unsigned instructions = pc_offset - last_pc_offset;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (unsigned i = 0; i < instructions; i++) {
- destroyer.masm()->int3();
- }
-#endif
- last_pc_offset = pc_offset;
if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
- last_pc_offset += gap_code_size;
- Address call_pc = code->instruction_start() + last_pc_offset;
- CodePatcher patcher(call_pc, patch_size());
- Address entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
- patcher.masm()->call(entry, RelocInfo::NONE);
- last_pc_offset += patch_size();
- RelocInfo rinfo(call_pc + 1, RelocInfo::RUNTIME_ENTRY,
- reinterpret_cast<intptr_t>(entry));
+ // The gap code is needed to get to the state expected at the
bailout.
+ curr_address += safepoint_entry.gap_code_size();
+
+ CodePatcher patcher(curr_address, patch_size());
+ Address deopt_entry = GetDeoptimizationEntry(deoptimization_index,
LAZY);
+ patcher.masm()->call(deopt_entry, RelocInfo::NONE);
+
+ // We use RUNTIME_ENTRY for deoptimization bailouts.
+ RelocInfo rinfo(curr_address + 1, // 1 after the call opcode.
+ RelocInfo::RUNTIME_ENTRY,
+ reinterpret_cast<intptr_t>(deopt_entry));
reloc_info_writer.Write(&rinfo);
- }
- }
-#ifdef DEBUG
- // Destroy the code which is not supposed to run again.
- unsigned instructions = code->safepoint_table_start() - last_pc_offset;
- CodePatcher destroyer(code->instruction_start() + last_pc_offset,
- instructions);
- for (unsigned i = 0; i < instructions; i++) {
- destroyer.masm()->int3();
- }
-#endif
+
+ curr_address += patch_size();
+ }
+ prev_address = curr_address;
+ }
+ ZapCodeRange(prev_address,
+ code_start_address + code->safepoint_table_offset());
// Move the relocation info to the beginning of the byte array.
- int reloc_size = end_address - reloc_info_writer.pos();
- memmove(code->relocation_start(), reloc_info_writer.pos(), reloc_size);
+ int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
+ memmove(code->relocation_start(), reloc_info_writer.pos(),
new_reloc_size);
// The relocation info is in place, update the size.
- reloc_info->set_length(reloc_size);
+ reloc_info->set_length(new_reloc_size);
// Handle the junk part after the new relocation info. We will create
// a non-live object in the extra space at the end of the former reloc
info.
- Address junk = reloc_info->address() + reloc_info->Size();
- ASSERT(junk <= end_address);
-
- if (end_address - junk <= ByteArray::kHeaderSize) {
- // We get in here if there is not enough space for a ByteArray.
-
- // Both addresses are kPointerSize alligned.
- CHECK_EQ((end_address - junk) % 4, 0);
- Map* filler_map = Heap::one_pointer_filler_map();
- while (junk < end_address) {
- HeapObject::FromAddress(junk)->set_map(filler_map);
- junk += kPointerSize;
- }
- } else {
- int size = end_address - junk;
- // Since the reloc_end address and junk are both alligned, we shouild,
- // never have junk which is not a multipla of kPointerSize.
- CHECK_EQ(size % kPointerSize, 0);
- CHECK_GT(size, 0);
- HeapObject* junk_object = HeapObject::FromAddress(junk);
- junk_object->set_map(Heap::byte_array_map());
- int length = ByteArray::LengthFor(end_address - junk);
- ByteArray::cast(junk_object)->set_length(length);
- }
+ Address junk_address = reloc_info->address() + reloc_info->Size();
+ ASSERT(junk_address <= reloc_end_address);
+ Heap::CreateFillerObjectAt(junk_address, reloc_end_address -
junk_address);
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
=======================================
--- /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Thu Feb 3
01:10:54 2011
+++ /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Thu Feb 3
02:07:22 2011
@@ -77,7 +77,7 @@
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
=======================================
--- /branches/bleeding_edge/src/objects-inl.h Wed Feb 2 05:31:52 2011
+++ /branches/bleeding_edge/src/objects-inl.h Thu Feb 3 02:07:22 2011
@@ -2510,29 +2510,29 @@
}
-unsigned Code::safepoint_table_start() {
+unsigned Code::safepoint_table_offset() {
ASSERT(kind() == OPTIMIZED_FUNCTION);
- return READ_UINT32_FIELD(this, kSafepointTableStartOffset);
+ return READ_UINT32_FIELD(this, kSafepointTableOffsetOffset);
}
-void Code::set_safepoint_table_start(unsigned offset) {
+void Code::set_safepoint_table_offset(unsigned offset) {
ASSERT(kind() == OPTIMIZED_FUNCTION);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kSafepointTableStartOffset, offset);
+ WRITE_UINT32_FIELD(this, kSafepointTableOffsetOffset, offset);
}
-unsigned Code::stack_check_table_start() {
+unsigned Code::stack_check_table_offset() {
ASSERT(kind() == FUNCTION);
- return READ_UINT32_FIELD(this, kStackCheckTableStartOffset);
+ return READ_UINT32_FIELD(this, kStackCheckTableOffsetOffset);
}
-void Code::set_stack_check_table_start(unsigned offset) {
+void Code::set_stack_check_table_offset(unsigned offset) {
ASSERT(kind() == FUNCTION);
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
- WRITE_UINT32_FIELD(this, kStackCheckTableStartOffset, offset);
+ WRITE_UINT32_FIELD(this, kStackCheckTableOffsetOffset, offset);
}
=======================================
--- /branches/bleeding_edge/src/objects.cc Wed Feb 2 05:31:52 2011
+++ /branches/bleeding_edge/src/objects.cc Thu Feb 3 02:07:22 2011
@@ -6001,7 +6001,7 @@
void Code::SetNoStackCheckTable() {
// Indicate the absence of a stack-check table by a table start after the
// end of the instructions. Table start must be aligned, so round up.
- set_stack_check_table_start(RoundUp(instruction_size(), kIntSize));
+ set_stack_check_table_offset(RoundUp(instruction_size(), kIntSize));
}
@@ -6278,7 +6278,7 @@
}
PrintF(out, "\n");
} else if (kind() == FUNCTION) {
- unsigned offset = stack_check_table_start();
+ unsigned offset = stack_check_table_offset();
// If there is no stack check table, the "table start" will at or after
// (due to alignment) the end of the instruction stream.
if (static_cast<int>(offset) < instruction_size()) {
=======================================
--- /branches/bleeding_edge/src/objects.h Wed Feb 2 05:31:52 2011
+++ /branches/bleeding_edge/src/objects.h Thu Feb 3 02:07:22 2011
@@ -3275,13 +3275,13 @@
// [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in
// the instruction stream where the safepoint table starts.
- inline unsigned safepoint_table_start();
- inline void set_safepoint_table_start(unsigned offset);
+ inline unsigned safepoint_table_offset();
+ inline void set_safepoint_table_offset(unsigned offset);
// [stack_check_table_start]: For kind FUNCTION, the offset in the
// instruction stream where the stack check table starts.
- inline unsigned stack_check_table_start();
- inline void set_stack_check_table_start(unsigned offset);
+ inline unsigned stack_check_table_offset();
+ inline void set_stack_check_table_offset(unsigned offset);
// [check type]: For kind CALL_IC, tells how to check if the
// receiver is valid for the given call.
@@ -3445,8 +3445,8 @@
static const int kAllowOSRAtLoopNestingLevelOffset =
kHasDeoptimizationSupportOffset + 1;
- static const int kSafepointTableStartOffset = kStackSlotsOffset +
kIntSize;
- static const int kStackCheckTableStartOffset = kStackSlotsOffset +
kIntSize;
+ static const int kSafepointTableOffsetOffset = kStackSlotsOffset +
kIntSize;
+ static const int kStackCheckTableOffsetOffset = kStackSlotsOffset +
kIntSize;
// Flags layout.
static const int kFlagsICStateShift = 0;
=======================================
--- /branches/bleeding_edge/src/runtime.cc Wed Feb 2 09:44:29 2011
+++ /branches/bleeding_edge/src/runtime.cc Thu Feb 3 02:07:22 2011
@@ -6970,7 +6970,7 @@
// the AST id matching the PC.
Address start = unoptimized->instruction_start();
unsigned target_pc_offset = static_cast<unsigned>(frame->pc() - start);
- Address table_cursor = start + unoptimized->stack_check_table_start();
+ Address table_cursor = start + unoptimized->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(table_cursor);
table_cursor += kIntSize;
for (unsigned i = 0; i < table_length; ++i) {
=======================================
--- /branches/bleeding_edge/src/safepoint-table.cc Wed Feb 2 05:55:29 2011
+++ /branches/bleeding_edge/src/safepoint-table.cc Thu Feb 3 02:07:22 2011
@@ -58,7 +58,7 @@
SafepointTable::SafepointTable(Code* code) {
ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
code_ = code;
- Address header = code->instruction_start() +
code->safepoint_table_start();
+ Address header = code->instruction_start() +
code->safepoint_table_offset();
length_ = Memory::uint32_at(header + kLengthOffset);
entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
pc_and_deoptimization_indexes_ = header + kHeaderSize;
=======================================
--- /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Wed Feb 2 05:55:29
2011
+++ /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Thu Feb 3 02:07:22
2011
@@ -88,8 +88,8 @@
}
#ifdef DEBUG
// Destroy the code which is not supposed to run again.
- CHECK(code->safepoint_table_start() >= last_pc_offset);
- unsigned instructions = code->safepoint_table_start() - last_pc_offset;
+ CHECK(code->safepoint_table_offset() >= last_pc_offset);
+ unsigned instructions = code->safepoint_table_offset() - last_pc_offset;
CodePatcher destroyer(code->instruction_start() + last_pc_offset,
instructions);
for (unsigned i = 0; i < instructions; i++) {
=======================================
--- /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Wed Feb 2
05:55:29 2011
+++ /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Thu Feb 3
02:07:22 2011
@@ -53,7 +53,7 @@
void LCodeGen::FinishCode(Handle<Code> code) {
ASSERT(is_done());
code->set_stack_slots(StackSlotCount());
- code->set_safepoint_table_start(safepoints_.GetCodeOffset());
+ code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev