Revision: 15640
Author: [email protected]
Date: Fri Jul 12 00:26:00 2013
Log: Remove special-casing of EAGER and SOFT deoptimization calling
conventions, allowing calling address to always be available to
deoptimization entries.
BUG=
Review URL: https://codereview.chromium.org/18356008
http://code.google.com/p/v8/source/detail?r=15640
Modified:
/branches/bleeding_edge/src/arm/deoptimizer-arm.cc
/branches/bleeding_edge/src/arm/lithium-codegen-arm.cc
/branches/bleeding_edge/src/deoptimizer.cc
/branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc
/branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc
/branches/bleeding_edge/src/mips/deoptimizer-mips.cc
/branches/bleeding_edge/src/mips/lithium-codegen-mips.cc
/branches/bleeding_edge/src/x64/deoptimizer-x64.cc
/branches/bleeding_edge/src/x64/lithium-codegen-x64.cc
=======================================
--- /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Mon Jun 3 08:32:22
2013
+++ /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Fri Jul 12 00:26:00
2013
@@ -35,7 +35,7 @@
namespace v8 {
namespace internal {
-const int Deoptimizer::table_entry_size_ = 16;
+const int Deoptimizer::table_entry_size_ = 12;
int Deoptimizer::patch_size() {
@@ -465,22 +465,12 @@
// Get the bailout id from the stack.
__ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible (r3)
(return
+ // Get the address of the location in the code object (r3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register r4.
- if (type() == EAGER || type() == SOFT) {
- __ mov(r3, Operand::Zero());
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else if (type() == OSR) {
- __ mov(r3, lr);
- // Correct one word for bailout id.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ mov(r3, lr);
- // Correct two words for bailout id and return address.
- __ add(r4, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ __ mov(r3, lr);
+ // Correct one word for bailout id.
+ __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
__ sub(r4, fp, r4);
// Allocate a new deoptimizer object.
@@ -521,13 +511,8 @@
__ vstr(d0, r1, dst_offset);
}
- // Remove the bailout id, eventually return address, and the saved
registers
- // from the stack.
- if (type() == EAGER || type() == SOFT || type() == OSR) {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ add(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ // Remove the bailout id and the saved registers from the stack.
+ __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
// Compute a pointer to the unwinding limit in register r2; that is
// the first stack slot not part of the input frame.
@@ -636,18 +621,12 @@
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
- // Create a sequence of deoptimization entries. Note that any
- // registers may be still live.
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
Label done;
for (int i = 0; i < count(); i++) {
int start = masm()->pc_offset();
USE(start);
- if (type() == EAGER || type() == SOFT) {
- __ nop();
- } else {
- // Emulate ia32 like call by pushing return address to stack.
- __ push(lr);
- }
__ mov(ip, Operand(i));
__ push(ip);
__ b(&done);
=======================================
--- /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Wed Jul 10
02:02:23 2013
+++ /branches/bleeding_edge/src/arm/lithium-codegen-arm.cc Fri Jul 12
00:26:00 2013
@@ -343,8 +343,7 @@
}
Label table_start;
__ bind(&table_start);
- Label needs_frame_not_call;
- Label needs_frame_is_call;
+ Label needs_frame;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
Address entry = deopt_jump_table_[i].address;
@@ -357,45 +356,24 @@
}
if (deopt_jump_table_[i].needs_frame) {
__ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
- if (type == Deoptimizer::LAZY) {
- if (needs_frame_is_call.is_bound()) {
- __ b(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- // This variant of deopt can only be used with stubs. Since we
don't
- // have a function pointer to install in the stack frame that
we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
- __ mov(lr, Operand(pc), LeaveCC, al);
- __ mov(pc, ip);
- }
+ if (needs_frame.is_bound()) {
+ __ b(&needs_frame);
} else {
- if (needs_frame_not_call.is_bound()) {
- __ b(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
- // This variant of deopt can only be used with stubs. Since we
don't
- // have a function pointer to install in the stack frame that
we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ add(fp, sp, Operand(2 * kPointerSize));
- __ mov(pc, ip);
- }
+ __ bind(&needs_frame);
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ // This variant of deopt can only be used with stubs. Since we
don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ __ mov(lr, Operand(pc), LeaveCC, al);
+ __ mov(pc, ip);
}
} else {
- if (type == Deoptimizer::LAZY) {
- __ mov(lr, Operand(pc), LeaveCC, al);
- __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
- } else {
- __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
- }
+ __ mov(lr, Operand(pc), LeaveCC, al);
+ __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
}
masm()->CheckConstPool(false, false);
}
@@ -803,13 +781,8 @@
}
ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
if (cc == al && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
- }
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
=======================================
--- /branches/bleeding_edge/src/deoptimizer.cc Thu Jul 11 09:45:58 2013
+++ /branches/bleeding_edge/src/deoptimizer.cc Fri Jul 12 00:26:00 2013
@@ -542,6 +542,7 @@
if (function->IsSmi()) {
function = NULL;
}
+ ASSERT(from != NULL);
if (function != NULL && function->IsOptimized()) {
function->shared()->increment_deopt_count();
if (bailout_type_ == Deoptimizer::SOFT) {
@@ -573,8 +574,6 @@
switch (bailout_type_) {
case Deoptimizer::SOFT:
case Deoptimizer::EAGER:
- ASSERT(from_ == NULL);
- return function->code();
case Deoptimizer::LAZY: {
Code* compiled_code =
isolate_->deoptimizer_data()->FindDeoptimizingCode(from_);
=======================================
--- /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Fri Jun 28
08:34:48 2013
+++ /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Fri Jul 12
00:26:00 2013
@@ -566,15 +566,11 @@
// Get the bailout id from the stack.
__ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible
+ // Get the address of the location in the code object
// and compute the fp-to-sp delta in register edx.
- if (type() == EAGER || type() == SOFT) {
- __ Set(ecx, Immediate(0));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
+ __ mov(ecx, Operand(esp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(edx, Operand(esp, kSavedRegistersAreaSize + 2 * kPointerSize));
+
__ sub(edx, ebp);
__ neg(edx);
@@ -620,12 +616,8 @@
// and check that the generated code never deoptimizes with unbalanced
stack.
__ fnclex();
- // Remove the bailout id and the double registers from the stack.
- if (type() == EAGER || type() == SOFT) {
- __ add(esp, Immediate(kDoubleRegsSize + kPointerSize));
- } else {
- __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
- }
+ // Remove the bailout id, return address and the double registers.
+ __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
// Compute a pointer to the unwinding limit in register ecx; that is
// the first stack slot not part of the input frame.
=======================================
--- /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Thu Jul 11
06:07:04 2013
+++ /branches/bleeding_edge/src/ia32/lithium-codegen-ia32.cc Fri Jul 12
00:26:00 2013
@@ -364,8 +364,7 @@
bool LCodeGen::GenerateJumpTable() {
- Label needs_frame_not_call;
- Label needs_frame_is_call;
+ Label needs_frame;
if (jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
}
@@ -381,56 +380,32 @@
}
if (jump_table_[i].needs_frame) {
__ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
- if (type == Deoptimizer::LAZY) {
- if (needs_frame_is_call.is_bound()) {
- __ jmp(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
- // This variant of deopt can only be used with stubs. Since we
don't
- // have a function pointer to install in the stack frame that
we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- // Push a PC inside the function so that the deopt code can find
where
- // the deopt comes from. It doesn't have to be the precise return
- // address of a "calling" LAZY deopt, it only has to be somewhere
- // inside the code body.
- Label push_approx_pc;
- __ call(&push_approx_pc);
- __ bind(&push_approx_pc);
- // Push the continuation which was stashed were the ebp should
- // be. Replace it with the saved ebp.
- __ push(MemOperand(esp, 3 * kPointerSize));
- __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
- __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
- __ ret(0); // Call the continuation without clobbering
registers.
- }
+ if (needs_frame.is_bound()) {
+ __ jmp(&needs_frame);
} else {
- if (needs_frame_not_call.is_bound()) {
- __ jmp(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
- // This variant of deopt can only be used with stubs. Since we
don't
- // have a function pointer to install in the stack frame that
we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
- // Push the continuation which was stashed were the ebp should
- // be. Replace it with the saved ebp.
- __ push(MemOperand(esp, 2 * kPointerSize));
- __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
- __ lea(ebp, MemOperand(esp, 3 * kPointerSize));
- __ ret(0); // Call the continuation without clobbering
registers.
- }
+ __ bind(&needs_frame);
+ __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
+ // This variant of deopt can only be used with stubs. Since we
don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+ // Push a PC inside the function so that the deopt code can find
where
+ // the deopt comes from. It doesn't have to be the precise return
+ // address of a "calling" LAZY deopt, it only has to be somewhere
+ // inside the code body.
+ Label push_approx_pc;
+ __ call(&push_approx_pc);
+ __ bind(&push_approx_pc);
+ // Push the continuation which was stashed were the ebp should
+ // be. Replace it with the saved ebp.
+ __ push(MemOperand(esp, 3 * kPointerSize));
+ __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
+ __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+ __ ret(0); // Call the continuation without clobbering registers.
}
} else {
- if (type == Deoptimizer::LAZY) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
}
}
return !is_aborted();
@@ -1004,11 +979,7 @@
ASSERT(info()->IsStub() || frame_is_built_);
if (cc == no_condition && frame_is_built_) {
- if (bailout_type == Deoptimizer::LAZY) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
=======================================
--- /branches/bleeding_edge/src/mips/deoptimizer-mips.cc Mon Jun 3
08:32:22 2013
+++ /branches/bleeding_edge/src/mips/deoptimizer-mips.cc Fri Jul 12
00:26:00 2013
@@ -457,22 +457,12 @@
// Get the bailout id from the stack.
__ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible (a3)
(return
+ // Get the address of the location in the code object (a3) (return
// address for lazy deoptimization) and compute the fp-to-sp delta in
// register t0.
- if (type() == EAGER || type() == SOFT) {
- __ mov(a3, zero_reg);
- // Correct one word for bailout id.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else if (type() == OSR) {
- __ mov(a3, ra);
- // Correct one word for bailout id.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ mov(a3, ra);
- // Correct two words for bailout id and return address.
- __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ __ mov(a3, ra);
+ // Correct one word for bailout id.
+ __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
__ Subu(t0, fp, t0);
@@ -521,13 +511,8 @@
__ sdc1(f0, MemOperand(a1, dst_offset));
}
- // Remove the bailout id, eventually return address, and the saved
registers
- // from the stack.
- if (type() == EAGER || type() == SOFT || type() == OSR) {
- __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
- } else {
- __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
- }
+ // Remove the bailout id and the saved registers from the stack.
+ __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
// Compute a pointer to the unwinding limit in register a2; that is
// the first stack slot not part of the input frame.
@@ -628,25 +613,19 @@
// Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 9 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 6 * Assembler::kInstrSize;
void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
- // Create a sequence of deoptimization entries. Note that any
- // registers may be still live.
+ // Create a sequence of deoptimization entries.
+ // Note that registers are still live when jumping to an entry.
Label table_start;
__ bind(&table_start);
for (int i = 0; i < count(); i++) {
Label start;
__ bind(&start);
- if (type() != EAGER && type() != SOFT) {
- // Emulate ia32 like call by pushing return address to stack.
- __ addiu(sp, sp, -2 * kPointerSize);
- __ sw(ra, MemOperand(sp, 1 * kPointerSize));
- } else {
- __ addiu(sp, sp, -1 * kPointerSize);
- }
+ __ addiu(sp, sp, -1 * kPointerSize);
// Jump over the remaining deopt entries (including this one).
// This code is always reached by calling Jump, which puts the target
(label
// start) into t9.
=======================================
--- /branches/bleeding_edge/src/mips/lithium-codegen-mips.cc Wed Jul 10
08:26:38 2013
+++ /branches/bleeding_edge/src/mips/lithium-codegen-mips.cc Fri Jul 12
00:26:00 2013
@@ -332,8 +332,7 @@
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
Label table_start;
__ bind(&table_start);
- Label needs_frame_not_call;
- Label needs_frame_is_call;
+ Label needs_frame;
for (int i = 0; i < deopt_jump_table_.length(); i++) {
__ bind(&deopt_jump_table_[i].label);
Address entry = deopt_jump_table_[i].address;
@@ -346,43 +345,22 @@
}
__ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
if (deopt_jump_table_[i].needs_frame) {
- if (type == Deoptimizer::LAZY) {
- if (needs_frame_is_call.is_bound()) {
- __ Branch(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
- // This variant of deopt can only be used with stubs. Since we
don't
- // have a function pointer to install in the stack frame that
we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- __ Call(t9);
- }
+ if (needs_frame.is_bound()) {
+ __ Branch(&needs_frame);
} else {
- if (needs_frame_not_call.is_bound()) {
- __ Branch(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ MultiPush(cp.bit() | fp.bit() | ra.bit());
- // This variant of deopt can only be used with stubs. Since we
don't
- // have a function pointer to install in the stack frame that
we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
- __ push(scratch0());
- __ Addu(fp, sp, Operand(2 * kPointerSize));
- __ Jump(t9);
- }
+ __ bind(&needs_frame);
+ __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+ // This variant of deopt can only be used with stubs. Since we
don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+ __ push(scratch0());
+ __ Addu(fp, sp, Operand(2 * kPointerSize));
+ __ Call(t9);
}
} else {
- if (type == Deoptimizer::LAZY) {
- __ Call(t9);
- } else {
- __ Jump(t9);
- }
+ __ Call(t9);
}
}
__ RecordComment("]");
@@ -780,13 +758,8 @@
}
ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
if (cc == al && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
- } else {
- __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
- }
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
=======================================
--- /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Fri Jun 28 08:34:48
2013
+++ /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Fri Jul 12 00:26:00
2013
@@ -451,16 +451,11 @@
// Get the bailout id from the stack.
__ movq(arg_reg_3, Operand(rsp, kSavedRegistersAreaSize));
- // Get the address of the location in the code object if possible
+ // Get the address of the location in the code object
// and compute the fp-to-sp delta in register arg5.
- if (type() == EAGER || type() == SOFT) {
- __ Set(arg_reg_4, 0);
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- } else {
- __ movq(arg_reg_4,
- Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
- __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
- }
+ __ movq(arg_reg_4,
+ Operand(rsp, kSavedRegistersAreaSize + 1 * kPointerSize));
+ __ lea(arg5, Operand(rsp, kSavedRegistersAreaSize + 2 * kPointerSize));
__ subq(arg5, rbp);
__ neg(arg5);
@@ -503,12 +498,8 @@
__ pop(Operand(rbx, dst_offset));
}
- // Remove the bailout id from the stack.
- if (type() == EAGER || type() == SOFT) {
- __ addq(rsp, Immediate(kPointerSize));
- } else {
- __ addq(rsp, Immediate(2 * kPointerSize));
- }
+ // Remove the bailout id and return address from the stack.
+ __ addq(rsp, Immediate(2 * kPointerSize));
// Compute a pointer to the unwinding limit in register rcx; that is
// the first stack slot not part of the input frame.
=======================================
--- /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Wed Jul 10
02:02:23 2013
+++ /branches/bleeding_edge/src/x64/lithium-codegen-x64.cc Fri Jul 12
00:26:00 2013
@@ -281,8 +281,7 @@
bool LCodeGen::GenerateJumpTable() {
- Label needs_frame_not_call;
- Label needs_frame_is_call;
+ Label needs_frame;
if (jump_table_.length() > 0) {
Comment(";;; -------------------- Jump table --------------------");
}
@@ -298,47 +297,24 @@
}
if (jump_table_[i].needs_frame) {
__ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
- if (type == Deoptimizer::LAZY) {
- if (needs_frame_is_call.is_bound()) {
- __ jmp(&needs_frame_is_call);
- } else {
- __ bind(&needs_frame_is_call);
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
- // This variant of deopt can only be used with stubs. Since we
don't
- // have a function pointer to install in the stack frame that
we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
- __ movq(rsi, MemOperand(rsp, kPointerSize));
- __ call(kScratchRegister);
- }
+ if (needs_frame.is_bound()) {
+ __ jmp(&needs_frame);
} else {
- if (needs_frame_not_call.is_bound()) {
- __ jmp(&needs_frame_not_call);
- } else {
- __ bind(&needs_frame_not_call);
- __ push(rbp);
- __ movq(rbp, rsp);
- __ push(rsi);
- // This variant of deopt can only be used with stubs. Since we
don't
- // have a function pointer to install in the stack frame that
we're
- // building, install a special marker there instead.
- ASSERT(info()->IsStub());
- __ Move(rsi, Smi::FromInt(StackFrame::STUB));
- __ push(rsi);
- __ movq(rsi, MemOperand(rsp, kPointerSize));
- __ jmp(kScratchRegister);
- }
+ __ bind(&needs_frame);
+ __ push(rbp);
+ __ movq(rbp, rsp);
+ __ push(rsi);
+ // This variant of deopt can only be used with stubs. Since we
don't
+ // have a function pointer to install in the stack frame that we're
+ // building, install a special marker there instead.
+ ASSERT(info()->IsStub());
+ __ Move(rsi, Smi::FromInt(StackFrame::STUB));
+ __ push(rsi);
+ __ movq(rsi, MemOperand(rsp, kPointerSize));
+ __ call(kScratchRegister);
}
} else {
- if (type == Deoptimizer::LAZY) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
}
}
return !is_aborted();
@@ -689,13 +665,8 @@
}
ASSERT(info()->IsStub() || frame_is_built_);
- bool needs_lazy_deopt = info()->IsStub();
if (cc == no_condition && frame_is_built_) {
- if (needs_lazy_deopt) {
- __ call(entry, RelocInfo::RUNTIME_ENTRY);
- } else {
- __ jmp(entry, RelocInfo::RUNTIME_ENTRY);
- }
+ __ call(entry, RelocInfo::RUNTIME_ENTRY);
} else {
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.