Revision: 13158
Author: [email protected]
Date: Fri Dec 7 00:55:06 2012
Log: Use count-based profiling exclusively.
Review URL: https://codereview.chromium.org/11437016
http://code.google.com/p/v8/source/detail?r=13158
Modified:
/branches/bleeding_edge/src/arm/deoptimizer-arm.cc
/branches/bleeding_edge/src/arm/full-codegen-arm.cc
/branches/bleeding_edge/src/execution.cc
/branches/bleeding_edge/src/execution.h
/branches/bleeding_edge/src/flag-definitions.h
/branches/bleeding_edge/src/full-codegen.cc
/branches/bleeding_edge/src/full-codegen.h
/branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc
/branches/bleeding_edge/src/ia32/full-codegen-ia32.cc
/branches/bleeding_edge/src/mips/deoptimizer-mips.cc
/branches/bleeding_edge/src/mips/full-codegen-mips.cc
/branches/bleeding_edge/src/platform-cygwin.cc
/branches/bleeding_edge/src/platform-freebsd.cc
/branches/bleeding_edge/src/platform-linux.cc
/branches/bleeding_edge/src/platform-macos.cc
/branches/bleeding_edge/src/platform-openbsd.cc
/branches/bleeding_edge/src/platform-solaris.cc
/branches/bleeding_edge/src/platform-win32.cc
/branches/bleeding_edge/src/runtime-profiler.cc
/branches/bleeding_edge/src/runtime-profiler.h
/branches/bleeding_edge/src/runtime.cc
/branches/bleeding_edge/src/x64/deoptimizer-x64.cc
/branches/bleeding_edge/src/x64/full-codegen-x64.cc
=======================================
--- /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Wed Dec 5 08:16:32
2012
+++ /branches/bleeding_edge/src/arm/deoptimizer-arm.cc Fri Dec 7 00:55:06
2012
@@ -114,7 +114,6 @@
}
-static const int32_t kBranchBeforeStackCheck = 0x2a000001;
static const int32_t kBranchBeforeInterrupt = 0x5a000004;
@@ -123,24 +122,21 @@
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
- // The call of the stack guard check has the following form:
- // e1 5d 00 0c cmp sp, <limit>
- // 2a 00 00 01 bcs ok
+ // The back edge bookkeeping code matches the pattern:
+ //
+ // <decrement profiling counter>
+ // 2a 00 00 01 bpl ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
- if (FLAG_count_based_interrupts) {
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- } else {
- ASSERT_EQ(kBranchBeforeStackCheck,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- }
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
// We patch the code to the following form:
- // e1 5d 00 0c cmp sp, <limit>
+ //
+ // <decrement profiling counter>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
@@ -177,15 +173,9 @@
// Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- if (FLAG_count_based_interrupts) {
- patcher.masm()->b(+16, pl);
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- } else {
- patcher.masm()->b(+4, cs);
- ASSERT_EQ(kBranchBeforeStackCheck,
- Memory::int32_at(pc_after - 3 * kInstrSize));
- }
+ patcher.masm()->b(+16, pl);
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
=======================================
--- /branches/bleeding_edge/src/arm/full-codegen-arm.cc Wed Dec 5 07:49:22
2012
+++ /branches/bleeding_edge/src/arm/full-codegen-arm.cc Fri Dec 7 00:55:06
2012
@@ -347,42 +347,31 @@
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Stack check");
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
// Block literal pools whilst emitting stack check code.
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ b(pl, &ok);
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- __ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
}
+ EmitProfilingCounterDecrement(weight);
+ __ b(pl, &ok);
+ InterruptStub stub;
+ __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to
find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
+ RecordBackEdge(stmt->OsrEntryId());
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -1251,7 +1240,7 @@
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ b(&loop);
// Remove the pointers stored on the stack.
=======================================
--- /branches/bleeding_edge/src/execution.cc Tue Dec 4 01:51:52 2012
+++ /branches/bleeding_edge/src/execution.cc Fri Dec 7 00:55:06 2012
@@ -428,25 +428,6 @@
thread_local_.interrupt_flags_ |= TERMINATE;
set_interrupt_limits(access);
}
-
-
-bool StackGuard::IsRuntimeProfilerTick() {
- ExecutionAccess access(isolate_);
- return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
-}
-
-
-void StackGuard::RequestRuntimeProfilerTick() {
- // Ignore calls if we're not optimizing or if we can't get the lock.
- if (FLAG_opt && ExecutionAccess::TryLock(isolate_)) {
- thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
- if (thread_local_.postpone_interrupts_nesting_ == 0) {
- thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
- isolate_->heap()->SetStackLimits();
- }
- ExecutionAccess::Unlock(isolate_);
- }
-}
void StackGuard::RequestCodeReadyEvent() {
@@ -946,13 +927,9 @@
}
isolate->counters()->stack_interrupts()->Increment();
- // If FLAG_count_based_interrupts, every interrupt is a profiler
interrupt.
- if (FLAG_count_based_interrupts ||
- stack_guard->IsRuntimeProfilerTick()) {
- isolate->counters()->runtime_profiler_ticks()->Increment();
- stack_guard->Continue(RUNTIME_PROFILER_TICK);
- isolate->runtime_profiler()->OptimizeNow();
- }
+ isolate->counters()->runtime_profiler_ticks()->Increment();
+ stack_guard->Continue(RUNTIME_PROFILER_TICK);
+ isolate->runtime_profiler()->OptimizeNow();
#ifdef ENABLE_DEBUGGER_SUPPORT
if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
DebugBreakHelper();
=======================================
--- /branches/bleeding_edge/src/execution.h Thu Jul 19 11:58:23 2012
+++ /branches/bleeding_edge/src/execution.h Fri Dec 7 00:55:06 2012
@@ -194,8 +194,6 @@
void Interrupt();
bool IsTerminateExecution();
void TerminateExecution();
- bool IsRuntimeProfilerTick();
- void RequestRuntimeProfilerTick();
bool IsCodeReadyEvent();
void RequestCodeReadyEvent();
#ifdef ENABLE_DEBUGGER_SUPPORT
=======================================
--- /branches/bleeding_edge/src/flag-definitions.h Wed Dec 5 08:22:14 2012
+++ /branches/bleeding_edge/src/flag-definitions.h Fri Dec 7 00:55:06 2012
@@ -247,8 +247,6 @@
DEFINE_bool(direct_self_opt, false,
"call recompile stub directly when self-optimizing")
DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
-DEFINE_bool(count_based_interrupts, false,
- "trigger profiler ticks based on counting instead of timing")
DEFINE_bool(interrupt_at_exit, false,
"insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false,
@@ -264,7 +262,6 @@
DEFINE_implication(experimental_profiler, self_optimization)
// Not implying direct_self_opt here because it seems to be a bad idea.
DEFINE_implication(experimental_profiler, retry_self_opt)
-DEFINE_implication(experimental_profiler, count_based_interrupts)
DEFINE_implication(experimental_profiler, interrupt_at_exit)
DEFINE_implication(experimental_profiler, weighted_back_edges)
=======================================
--- /branches/bleeding_edge/src/full-codegen.cc Wed Dec 5 03:04:10 2012
+++ /branches/bleeding_edge/src/full-codegen.cc Fri Dec 7 00:55:06 2012
@@ -471,9 +471,8 @@
}
-void FullCodeGenerator::RecordStackCheck(BailoutId ast_id) {
- // The pc offset does not need to be encoded and packed together with a
- // state.
+void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
+ // The pc offset does not need to be encoded and packed together with a
state.
ASSERT(masm_->pc_offset() > 0);
BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset())
};
stack_checks_.Add(entry, zone());
@@ -1269,7 +1268,7 @@
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
__ bind(&stack_check);
- EmitStackCheck(stmt, &body);
+ EmitBackEdgeBookkeeping(stmt, &body);
__ jmp(&body);
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1298,7 +1297,7 @@
SetStatementPosition(stmt);
// Check stack before looping.
- EmitStackCheck(stmt, &body);
+ EmitBackEdgeBookkeeping(stmt, &body);
__ bind(&test);
VisitForControl(stmt->cond(),
@@ -1344,7 +1343,7 @@
SetStatementPosition(stmt);
// Check stack before looping.
- EmitStackCheck(stmt, &body);
+ EmitBackEdgeBookkeeping(stmt, &body);
__ bind(&test);
if (stmt->cond() != NULL) {
=======================================
--- /branches/bleeding_edge/src/full-codegen.h Wed Dec 5 03:04:10 2012
+++ /branches/bleeding_edge/src/full-codegen.h Fri Dec 7 00:55:06 2012
@@ -451,14 +451,13 @@
// neither a with nor a catch context.
void EmitDebugCheckDeclarationContext(Variable* variable);
- // Platform-specific code for checking the stack limit at the back edge
of
- // a loop.
// This is meant to be called at loop back edges, |back_edge_target| is
// the jump target of the back edge and is used to approximate the amount
// of code inside the loop.
- void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target);
- // Record the OSR AST id corresponding to a stack check in the code.
- void RecordStackCheck(BailoutId osr_ast_id);
+ void EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target);
+ // Record the OSR AST id corresponding to a back edge in the code.
+ void RecordBackEdge(BailoutId osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return
// the offset of the start of the table.
unsigned EmitStackCheckTable();
@@ -817,6 +816,8 @@
int module_index_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
+ // TODO(svenpanne) Rename this to something like back_edges_ and rename
+ // related functions accordingly.
ZoneList<BailoutEntry> stack_checks_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
=======================================
--- /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Wed Dec 5
08:16:32 2012
+++ /branches/bleeding_edge/src/ia32/deoptimizer-ia32.cc Fri Dec 7
00:55:06 2012
@@ -210,8 +210,6 @@
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x13;
-static const byte kJaeInstruction = 0x73;
-static const byte kJaeOffset = 0x07;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
@@ -224,31 +222,26 @@
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(check_code->entry(),
Assembler::target_address_at(call_target_address));
- // The stack check code matches the pattern:
+ // The back edge bookkeeping code matches the pattern:
//
- // cmp esp, <limit>
- // jae ok
+ // sub <profiling_counter>, <delta>
+ // jns ok
// call <stack guard>
// test eax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
- // cmp esp, <limit> ;; Not changed
+ // sub <profiling_counter>, <delta> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
- if (FLAG_count_based_interrupts) {
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- } else {
- ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
- }
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
@@ -272,13 +265,8 @@
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (FLAG_count_based_interrupts) {
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- } else {
- *(call_target_address - 3) = kJaeInstruction;
- *(call_target_address - 2) = kJaeOffset;
- }
+ *(call_target_address - 3) = kJnsInstruction;
+ *(call_target_address - 2) = kJnsOffset;
Assembler::set_target_address_at(call_target_address,
check_code->entry());
=======================================
--- /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc Wed Dec 5
07:49:22 2012
+++ /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc Fri Dec 7
00:55:06 2012
@@ -329,39 +329,27 @@
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Stack check");
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- // Count based interrupts happen often enough when they are enabled
- // that the additional stack checks are not necessary (they would
- // only check for interrupts).
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
}
+ EmitProfilingCounterDecrement(weight);
+ __ j(positive, &ok, Label::kNear);
+ InterruptStub stub;
+ __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to
find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
+ RecordBackEdge(stmt->OsrEntryId());
// Loop stack checks can be patched to perform on-stack replacement. In
// order to decide whether or not to perform OSR we embed the loop depth
@@ -370,9 +358,7 @@
ASSERT(loop_depth() > 0);
__ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -1200,7 +1186,7 @@
__ bind(loop_statement.continue_label());
__ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
=======================================
--- /branches/bleeding_edge/src/mips/deoptimizer-mips.cc Mon Oct 22
02:48:56 2012
+++ /branches/bleeding_edge/src/mips/deoptimizer-mips.cc Fri Dec 7
00:55:06 2012
@@ -120,7 +120,7 @@
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
- // This structure comes from FullCodeGenerator::EmitStackCheck.
+ // This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
// The call of the stack guard check has the following form:
// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based
interrupts)
// beq at, zero_reg, ok
@@ -170,11 +170,7 @@
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- if (FLAG_count_based_interrupts) {
- patcher.masm()->slt(at, a3, zero_reg);
- } else {
- patcher.masm()->sltu(at, sp, t0);
- }
+ patcher.masm()->slt(at, a3, zero_reg);
// Replace the on-stack replacement address in the load-immediate
(lui/ori
// pair) with the entry address of the normal stack-check code.
=======================================
--- /branches/bleeding_edge/src/mips/full-codegen-mips.cc Wed Nov 28
23:38:00 2012
+++ /branches/bleeding_edge/src/mips/full-codegen-mips.cc Fri Dec 7
00:55:06 2012
@@ -346,45 +346,34 @@
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so
we need
// to make sure it is constant. Branch may emit a skip-or-jump sequence
// instead of the normal Branch. It seems that the "skip" part of that
// sequence is about as long as this Branch would be so it is safe to
ignore
// that.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Comment cmnt(masm_, "[ Stack check");
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ slt(at, a3, zero_reg);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay
slot.
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- __ LoadRoot(t0, Heap::kStackLimitRootIndex);
- __ sltu(at, sp, t0);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay
slot.
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
}
+ EmitProfilingCounterDecrement(weight);
+ __ slt(at, a3, zero_reg);
+ __ beq(at, zero_reg, &ok);
+ // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+ InterruptStub stub;
+ __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to
find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ RecordBackEdge(stmt->OsrEntryId());
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -1259,7 +1248,7 @@
__ Addu(a0, a0, Operand(Smi::FromInt(1)));
__ push(a0);
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ Branch(&loop);
// Remove the pointers stored on the stack.
=======================================
--- /branches/bleeding_edge/src/platform-cygwin.cc Fri Nov 30 02:26:21 2012
+++ /branches/bleeding_edge/src/platform-cygwin.cc Fri Dec 7 00:55:06 2012
@@ -655,24 +655,13 @@
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
+ } else {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
NULL)) {
- return;
- }
- }
OS::Sleep(interval_);
}
}
@@ -684,11 +673,6 @@
reinterpret_cast<SamplerThread*>(raw_sampler_thread);
sampler_thread->SampleContext(sampler);
}
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
void SampleContext(Sampler* sampler) {
HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
=======================================
--- /branches/bleeding_edge/src/platform-freebsd.cc Fri Nov 30 02:26:21 2012
+++ /branches/bleeding_edge/src/platform-freebsd.cc Fri Dec 7 00:55:06 2012
@@ -712,11 +712,6 @@
class SignalSender : public Thread {
public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
@@ -767,38 +762,14 @@
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
+ } else {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- } else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
- }
+ Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is
enough.
}
}
@@ -807,22 +778,16 @@
SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
- void Sleep(SleepInterval full_or_half) {
+ void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
=======================================
--- /branches/bleeding_edge/src/platform-linux.cc Fri Nov 30 02:26:21 2012
+++ /branches/bleeding_edge/src/platform-linux.cc Fri Dec 7 00:55:06 2012
@@ -1091,11 +1091,6 @@
class SignalSender : public Thread {
public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
@@ -1151,43 +1146,16 @@
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_) {
- InstallSignalHandler();
- } else if (!cpu_profiling_enabled && signal_handler_installed_) {
- RestoreSignalHandler();
- }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ if (!signal_handler_installed_) InstallSignalHandler();
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
} else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
+ if (signal_handler_installed_) RestoreSignalHandler();
+ if (rate_limiter_.SuspendIfNecessary()) continue;
}
+ Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is
enough.
}
}
@@ -1196,11 +1164,6 @@
SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
void SendProfilingSignal(int tid) {
if (!signal_handler_installed_) return;
@@ -1212,11 +1175,10 @@
#endif
}
- void Sleep(SleepInterval full_or_half) {
+ void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
#if defined(ANDROID)
usleep(interval);
#else
=======================================
--- /branches/bleeding_edge/src/platform-macos.cc Fri Nov 30 02:26:21 2012
+++ /branches/bleeding_edge/src/platform-macos.cc Fri Dec 7 00:55:06 2012
@@ -787,24 +787,13 @@
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
+ } else {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
NULL)) {
- return;
- }
- }
OS::Sleep(interval_);
}
}
@@ -816,11 +805,6 @@
reinterpret_cast<SamplerThread*>(raw_sampler_thread);
sampler_thread->SampleContext(sampler);
}
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
void SampleContext(Sampler* sampler) {
thread_act_t profiled_thread =
sampler->platform_data()->profiled_thread();
=======================================
--- /branches/bleeding_edge/src/platform-openbsd.cc Fri Nov 30 02:26:21 2012
+++ /branches/bleeding_edge/src/platform-openbsd.cc Fri Dec 7 00:55:06 2012
@@ -784,11 +784,6 @@
class SignalSender : public Thread {
public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
@@ -844,43 +839,16 @@
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_) {
- InstallSignalHandler();
- } else if (!cpu_profiling_enabled && signal_handler_installed_) {
- RestoreSignalHandler();
- }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
- if (rate_limiter_.SuspendIfNecessary()) continue;
- }
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ if (!signal_handler_installed_) InstallSignalHandler();
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
} else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
+ if (signal_handler_installed_) RestoreSignalHandler();
+ if (rate_limiter_.SuspendIfNecessary()) continue;
}
+ Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is
enough.
}
}
@@ -889,22 +857,16 @@
SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
- void Sleep(SleepInterval full_or_half) {
+ void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
=======================================
--- /branches/bleeding_edge/src/platform-solaris.cc Fri Nov 30 02:26:21 2012
+++ /branches/bleeding_edge/src/platform-solaris.cc Fri Dec 7 00:55:06 2012
@@ -701,11 +701,6 @@
class SignalSender : public Thread {
public:
- enum SleepInterval {
- HALF_INTERVAL,
- FULL_INTERVAL
- };
-
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
@@ -760,44 +755,16 @@
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
- if (cpu_profiling_enabled && !signal_handler_installed_) {
- InstallSignalHandler();
- } else if (!cpu_profiling_enabled && signal_handler_installed_) {
- RestoreSignalHandler();
- }
-
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ if (!signal_handler_installed_) InstallSignalHandler();
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
+ } else {
+ if (signal_handler_installed_) RestoreSignalHandler();
if (rate_limiter_.SuspendIfNecessary()) continue;
}
- if (cpu_profiling_enabled && runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
NULL)) {
- return;
- }
- Sleep(HALF_INTERVAL);
- } else {
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
- this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
- NULL)) {
- return;
- }
- }
- Sleep(FULL_INTERVAL);
- }
+ Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is
enough.
}
}
@@ -806,22 +773,16 @@
SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
- void Sleep(SleepInterval full_or_half) {
+ void Sleep() {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
- if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
=======================================
--- /branches/bleeding_edge/src/platform-win32.cc Fri Nov 30 02:26:21 2012
+++ /branches/bleeding_edge/src/platform-win32.cc Fri Dec 7 00:55:06 2012
@@ -2010,24 +2010,13 @@
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
- bool cpu_profiling_enabled =
- (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
- bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (!cpu_profiling_enabled) {
+ if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
+ SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
+ } else {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
- if (cpu_profiling_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
- return;
- }
- }
- if (runtime_profiler_enabled) {
- if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
NULL)) {
- return;
- }
- }
OS::Sleep(interval_);
}
}
@@ -2039,11 +2028,6 @@
reinterpret_cast<SamplerThread*>(raw_sampler_thread);
sampler_thread->SampleContext(sampler);
}
-
- static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
- if (!sampler->isolate()->IsInitialized()) return;
- sampler->isolate()->runtime_profiler()->NotifyTick();
- }
void SampleContext(Sampler* sampler) {
HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
=======================================
--- /branches/bleeding_edge/src/runtime-profiler.cc Wed Nov 21 23:58:59 2012
+++ /branches/bleeding_edge/src/runtime-profiler.cc Fri Dec 7 00:55:06 2012
@@ -196,16 +196,9 @@
// Get the stack check stub code object to match against. We aren't
// prepared to generate it, but we don't expect to have to.
- bool found_code = false;
Code* stack_check_code = NULL;
- if (FLAG_count_based_interrupts) {
- InterruptStub interrupt_stub;
- found_code = interrupt_stub.FindCodeInCache(&stack_check_code,
isolate_);
- } else // NOLINT
- { // NOLINT
- StackCheckStub check_stub;
- found_code = check_stub.FindCodeInCache(&stack_check_code, isolate_);
- }
+ InterruptStub interrupt_stub;
+ bool found_code = interrupt_stub.FindCodeInCache(&stack_check_code,
isolate_);
if (found_code) {
Code* replacement_code =
isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
@@ -377,12 +370,6 @@
}
}
}
-
-
-void RuntimeProfiler::NotifyTick() {
- if (FLAG_count_based_interrupts) return;
- isolate_->stack_guard()->RequestRuntimeProfilerTick();
-}
void RuntimeProfiler::SetUp() {
=======================================
--- /branches/bleeding_edge/src/runtime-profiler.h Thu Apr 5 07:10:39 2012
+++ /branches/bleeding_edge/src/runtime-profiler.h Fri Dec 7 00:55:06 2012
@@ -52,8 +52,6 @@
void OptimizeNow();
- void NotifyTick();
-
void SetUp();
void Reset();
void TearDown();
=======================================
--- /branches/bleeding_edge/src/runtime.cc Wed Dec 5 07:49:22 2012
+++ /branches/bleeding_edge/src/runtime.cc Fri Dec 7 00:55:06 2012
@@ -8175,15 +8175,8 @@
function->PrintName();
PrintF("]\n");
}
- Handle<Code> check_code;
- if (FLAG_count_based_interrupts) {
- InterruptStub interrupt_stub;
- check_code = interrupt_stub.GetCode();
- } else // NOLINT
- { // NOLINT
- StackCheckStub check_stub;
- check_code = check_stub.GetCode();
- }
+ InterruptStub interrupt_stub;
+ Handle<Code> check_code = interrupt_stub.GetCode();
Handle<Code> replacement_code =
isolate->builtins()->OnStackReplacement();
Deoptimizer::RevertStackCheckCode(*unoptimized,
*check_code,
=======================================
--- /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Wed Dec 5 08:16:32
2012
+++ /branches/bleeding_edge/src/x64/deoptimizer-x64.cc Fri Dec 7 00:55:06
2012
@@ -116,8 +116,6 @@
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x1f;
-static const byte kJaeInstruction = 0x73;
-static const byte kJaeOffset = 0x07;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
@@ -129,31 +127,26 @@
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(check_code->entry(),
Assembler::target_address_at(call_target_address));
- // The stack check code matches the pattern:
+ // The back edge bookkeeping code matches the pattern:
//
- // cmp rsp, <limit>
- // jae ok
+ // add <profiling_counter>, <-delta>
+ // jns ok
// call <stack guard>
// test rax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
- // cmp rsp, <limit> ;; Not changed
+ // add <profiling_counter>, <-delta> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test rax, <loop nesting depth>
// ok:
//
- if (FLAG_count_based_interrupts) {
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- } else {
- ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
- }
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
@@ -176,13 +169,8 @@
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- if (FLAG_count_based_interrupts) {
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
- } else {
- *(call_target_address - 3) = kJaeInstruction;
- *(call_target_address - 2) = kJaeOffset;
- }
+ *(call_target_address - 3) = kJnsInstruction;
+ *(call_target_address - 2) = kJnsOffset;
Assembler::set_target_address_at(call_target_address,
check_code->entry());
=======================================
--- /branches/bleeding_edge/src/x64/full-codegen-x64.cc Wed Dec 5 07:49:22
2012
+++ /branches/bleeding_edge/src/x64/full-codegen-x64.cc Fri Dec 7 00:55:06
2012
@@ -325,34 +325,27 @@
}
-void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Stack check");
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Back edge bookkeeping");
Label ok;
- if (FLAG_count_based_interrupts) {
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
- }
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
- } else {
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ CallStub(&stub);
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
}
+ EmitProfilingCounterDecrement(weight);
+ __ j(positive, &ok, Label::kNear);
+ InterruptStub stub;
+ __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to
find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordStackCheck(stmt->OsrEntryId());
+ RecordBackEdge(stmt->OsrEntryId());
// Loop stack checks can be patched to perform on-stack replacement. In
// order to decide whether or not to perform OSR we embed the loop depth
@@ -361,9 +354,7 @@
ASSERT(loop_depth() > 0);
__ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
- if (FLAG_count_based_interrupts) {
- EmitProfilingCounterReset();
- }
+ EmitProfilingCounterReset();
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -1221,7 +1212,7 @@
__ bind(loop_statement.continue_label());
__ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
- EmitStackCheck(stmt, &loop);
+ EmitBackEdgeBookkeeping(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev