Revision: 16425
Author: [email protected]
Date: Thu Aug 29 13:06:04 2013 UTC
Log: Refactor interrupt check patching for OSR.
This is to prepare for speculative concurrent OSR. I'm planning to add
another builtin to patch to, to indicate a concurrent OSR.
[email protected]
BUG=
Review URL: https://codereview.chromium.org/23608004
http://code.google.com/p/v8/source/detail?r=16425
Modified:
/branches/bleeding_edge/src/arm/deoptimizer-arm.cc
/branches/bleeding_edge/src/compiler.cc
/branches/bleeding_edge/src/deoptimizer.cc
/branches/bleeding_edge/src/deoptimizer.h
/branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc
/branches/bleeding_edge/src/mips/deoptimizer-mips.cc
/branches/bleeding_edge/src/runtime-profiler.cc
/branches/bleeding_edge/src/runtime.cc
/branches/bleeding_edge/src/x64/deoptimizer-x64.cc
=======================================
--- /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Wed Jul 24 11:12:17
2013 UTC
+++ /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Thu Aug 29 13:06:04
2013 UTC
@@ -101,12 +101,7 @@
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Turn the jump into nops.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
@@ -125,12 +120,7 @@
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the original jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
@@ -150,10 +140,10 @@
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
@@ -164,17 +154,23 @@
if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
- ASSERT(reinterpret_cast<uint32_t>(replacement_code->entry()) ==
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
- return true;
+ return PATCHED_FOR_OSR;
} else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_code = NULL;
+ InterruptStub stub;
+ if (!stub.FindCodeInCache(&interrupt_code, isolate)) UNREACHABLE();
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
ASSERT(reinterpret_cast<uint32_t>(interrupt_code->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
=======================================
--- /branches/bleeding_edge/src/compiler.cc Thu Aug 29 09:15:13 2013 UTC
+++ /branches/bleeding_edge/src/compiler.cc Thu Aug 29 13:06:04 2013 UTC
@@ -1024,13 +1024,7 @@
// aborted optimization. In either case we want to continue executing
// the unoptimized code without running into OSR. If the unoptimized
// code has been patched for OSR, unpatch it.
- InterruptStub interrupt_stub;
- Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
- Handle<Code> replacement_code =
- isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertInterruptCode(shared->code(),
- *interrupt_code,
- *replacement_code);
+ Deoptimizer::RevertInterruptCode(isolate, shared->code());
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
=======================================
--- /branches/bleeding_edge/src/deoptimizer.cc Thu Aug 29 09:15:13 2013 UTC
+++ /branches/bleeding_edge/src/deoptimizer.cc Thu Aug 29 13:06:04 2013 UTC
@@ -2578,9 +2578,17 @@
}
-void Deoptimizer::PatchInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code) {
+void Deoptimizer::PatchInterruptCode(Isolate* isolate,
+ Code* unoptimized_code) {
+ DisallowHeapAllocation no_gc;
+ // Get the interrupt stub code object to match against. We aren't
+ // prepared to generate it, but we don't expect to have to.
+ Code* interrupt_code = NULL;
+ InterruptStub interrupt_stub;
+ CHECK(interrupt_stub.FindCodeInCache(&interrupt_code, isolate));
+ Code* replacement_code =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+
// Iterate over the back edge table and patch every interrupt
// call to an unconditional call to the replacement code.
int loop_nesting_level =
unoptimized_code->allow_osr_at_loop_nesting_level();
@@ -2589,9 +2597,11 @@
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) == loop_nesting_level) {
+ ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate,
+ unoptimized_code,
+ back_edges.pc()));
PatchInterruptCodeAt(unoptimized_code,
back_edges.pc(),
- interrupt_code,
replacement_code);
}
}
@@ -2599,14 +2609,17 @@
unoptimized_code->set_back_edges_patched_for_osr(true);
#ifdef DEBUG
Deoptimizer::VerifyInterruptCode(
- unoptimized_code, interrupt_code, replacement_code,
loop_nesting_level);
+ isolate, unoptimized_code, loop_nesting_level);
#endif // DEBUG
}
-void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code) {
+void Deoptimizer::RevertInterruptCode(Isolate* isolate,
+ Code* unoptimized_code) {
+ InterruptStub interrupt_stub;
+ Code* interrupt_code = *interrupt_stub.GetCode(isolate);
+ DisallowHeapAllocation no_gc;
+
// Iterate over the back edge table and revert the patched interrupt
calls.
ASSERT(unoptimized_code->back_edges_patched_for_osr());
int loop_nesting_level =
unoptimized_code->allow_osr_at_loop_nesting_level();
@@ -2615,10 +2628,10 @@
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) <= loop_nesting_level) {
- RevertInterruptCodeAt(unoptimized_code,
- back_edges.pc(),
- interrupt_code,
- replacement_code);
+ ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate,
+ unoptimized_code,
+ back_edges.pc()));
+ RevertInterruptCodeAt(unoptimized_code, back_edges.pc(),
interrupt_code);
}
}
@@ -2626,16 +2639,14 @@
unoptimized_code->set_allow_osr_at_loop_nesting_level(0);
#ifdef DEBUG
// Assert that none of the back edges are patched anymore.
- Deoptimizer::VerifyInterruptCode(
- unoptimized_code, interrupt_code, replacement_code, -1);
+ Deoptimizer::VerifyInterruptCode(isolate, unoptimized_code, -1);
#endif // DEBUG
}
#ifdef DEBUG
-void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code,
+void Deoptimizer::VerifyInterruptCode(Isolate* isolate,
+ Code* unoptimized_code,
int loop_nesting_level) {
for (FullCodeGenerator::BackEdgeTableIterator
back_edges(unoptimized_code);
!back_edges.Done();
@@ -2645,10 +2656,9 @@
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
- InterruptCodeIsPatched(unoptimized_code,
- back_edges.pc(),
- interrupt_code,
- replacement_code));
+ GetInterruptPatchState(isolate,
+ unoptimized_code,
+ back_edges.pc()) != NOT_PATCHED);
}
}
#endif // DEBUG
=======================================
--- /branches/bleeding_edge/src/deoptimizer.h Thu Aug 22 13:03:40 2013 UTC
+++ /branches/bleeding_edge/src/deoptimizer.h Thu Aug 29 13:06:04 2013 UTC
@@ -144,6 +144,11 @@
DEBUGGER
};
+ enum InterruptPatchState {
+ NOT_PATCHED,
+ PATCHED_FOR_OSR
+ };
+
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
struct JumpTableEntry {
@@ -231,40 +236,34 @@
// Patch all interrupts with allowed loop depth in the unoptimized code
to
// unconditionally call replacement_code.
- static void PatchInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code);
+ static void PatchInterruptCode(Isolate* isolate,
+ Code* unoptimized_code);
// Patch the interrupt at the instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
static void PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code);
// Change all patched interrupts patched in the unoptimized code
// back to normal interrupts.
- static void RevertInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code);
+ static void RevertInterruptCode(Isolate* isolate,
+ Code* unoptimized_code);
// Change patched interrupt in the unoptimized code
// back to a normal interrupt.
static void RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code);
+ Code* interrupt_code);
#ifdef DEBUG
- static bool InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code);
+ static InterruptPatchState GetInterruptPatchState(Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after);
// Verify that all back edges of a certain loop depth are patched.
- static void VerifyInterruptCode(Code* unoptimized_code,
- Code* interrupt_code,
- Code* replacement_code,
+ static void VerifyInterruptCode(Isolate* isolate,
+ Code* unoptimized_code,
int loop_nesting_level);
#endif // DEBUG
=======================================
--- /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Fri Aug 2
09:53:11 2013 UTC
+++ /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Thu Aug 29
13:06:04 2013 UTC
@@ -200,12 +200,7 @@
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
@@ -221,12 +216,7 @@
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
@@ -241,23 +231,29 @@
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT_EQ(replacement_code->entry(),
- Assembler::target_address_at(call_target_address));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- return true;
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT_EQ(osr_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ return PATCHED_FOR_OSR;
} else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_code = NULL;
+ InterruptStub stub;
+ if (!stub.FindCodeInCache(&interrupt_code, isolate)) UNREACHABLE();
ASSERT_EQ(interrupt_code->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
=======================================
--- /branches/bleeding_edge/src/mips/deoptimizer-mips.cc Wed Jul 24
11:12:17 2013 UTC
+++ /branches/bleeding_edge/src/mips/deoptimizer-mips.cc Thu Aug 29
13:06:04 2013 UTC
@@ -101,12 +101,7 @@
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Replace the sltu instruction with load-imm 1 to at, so beq is not
taken.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
@@ -123,12 +118,7 @@
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
@@ -143,23 +133,29 @@
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
if (Assembler::IsAddImmediate(
Assembler::instr_at(pc_after - 6 * kInstrSize))) {
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
- return true;
+ reinterpret_cast<uint32_t>(osr_builtin->entry()));
+ return PATCHED_FOR_OSR;
} else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_code = NULL;
+ InterruptStub stub;
+ if (!stub.FindCodeInCache(&interrupt_code, isolate)) UNREACHABLE();
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
reinterpret_cast<uint32_t>(interrupt_code->entry()));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
=======================================
--- /branches/bleeding_edge/src/runtime-profiler.cc Fri Aug 23 13:30:02
2013 UTC
+++ /branches/bleeding_edge/src/runtime-profiler.cc Thu Aug 29 13:06:04
2013 UTC
@@ -177,18 +177,7 @@
PrintF(" for on-stack replacement]\n");
}
- // Get the interrupt stub code object to match against. We aren't
- // prepared to generate it, but we don't expect to have to.
- Code* interrupt_code = NULL;
- InterruptStub interrupt_stub;
- bool found_code = interrupt_stub.FindCodeInCache(&interrupt_code,
isolate_);
- if (found_code) {
- Code* replacement_code =
- isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
- Code* unoptimized_code = shared->code();
- Deoptimizer::PatchInterruptCode(
- unoptimized_code, interrupt_code, replacement_code);
- }
+ Deoptimizer::PatchInterruptCode(isolate_, shared->code());
}
=======================================
--- /branches/bleeding_edge/src/runtime.cc Tue Aug 27 13:55:00 2013 UTC
+++ /branches/bleeding_edge/src/runtime.cc Thu Aug 29 13:06:04 2013 UTC
@@ -8662,12 +8662,7 @@
function->PrintName();
PrintF("]\n");
}
- InterruptStub interrupt_stub;
- Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
- Handle<Code> replacement_code =
isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertInterruptCode(*unoptimized,
- *interrupt_code,
- *replacement_code);
+ Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
// If the optimization attempt succeeded, return the AST id tagged as a
// smi. This tells the builtin that we need to translate the unoptimized
=======================================
--- /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Tue Aug 6 00:09:01
2013 UTC
+++ /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Thu Aug 29 13:06:04
2013 UTC
@@ -105,12 +105,7 @@
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
Code* replacement_code) {
- ASSERT(!InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
@@ -126,12 +121,7 @@
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
- ASSERT(InterruptCodeIsPatched(unoptimized_code,
- pc_after,
- interrupt_code,
- replacement_code));
+ Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
@@ -146,23 +136,29 @@
#ifdef DEBUG
-bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
- Address pc_after,
- Code* interrupt_code,
- Code* replacement_code) {
+Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
+ Isolate* isolate,
+ Code* unoptimized_code,
+ Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
- ASSERT(replacement_code->entry() ==
- Assembler::target_address_at(call_target_address));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- return true;
+ Code* osr_builtin =
+ isolate->builtins()->builtin(Builtins::kOnStackReplacement);
+ ASSERT_EQ(osr_builtin->entry(),
+ Assembler::target_address_at(call_target_address));
+ return PATCHED_FOR_OSR;
} else {
+ // Get the interrupt stub code object to match against from cache.
+ Code* interrupt_code = NULL;
+ InterruptStub stub;
+ if (!stub.FindCodeInCache(&interrupt_code, isolate)) UNREACHABLE();
ASSERT_EQ(interrupt_code->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- return false;
+ return NOT_PATCHED;
}
}
#endif // DEBUG
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.