Revision: 11368
Author: [email protected]
Date: Wed Apr 18 04:52:42 2012
Log: Merged r11143, r11162, r11174, r11208, r11213, r11222 into 3.9
branch.
Reset function info counters after context disposal.
Move profiler_ticks to Code object, don't walk the stack when patching ICs
Reset the optimization_disabled flag in function info after context
disposal.
Check code kind when resetting profiler ticks.
Make progress in incremental marking if scavenge is delaying mark-sweep.
Reset function info counters after context disposal in incremental marking
step.
Original CLs:
https://chromiumcodereview.appspot.com/9836091
https://chromiumcodereview.appspot.com/9866030
https://chromiumcodereview.appspot.com/9873022
https://chromiumcodereview.appspot.com/9956060
https://chromiumcodereview.appspot.com/9965054
https://chromiumcodereview.appspot.com/9903019
BUG=121147,117767,v8:1902
[email protected]
Review URL: https://chromiumcodereview.appspot.com/10041025
http://code.google.com/p/v8/source/detail?r=11368
Modified:
/branches/3.9/src/compiler.cc
/branches/3.9/src/factory.cc
/branches/3.9/src/full-codegen.cc
/branches/3.9/src/heap.cc
/branches/3.9/src/heap.h
/branches/3.9/src/ic.cc
/branches/3.9/src/incremental-marking.cc
/branches/3.9/src/incremental-marking.h
/branches/3.9/src/mark-compact.cc
/branches/3.9/src/objects-inl.h
/branches/3.9/src/objects-printer.cc
/branches/3.9/src/objects.cc
/branches/3.9/src/objects.h
/branches/3.9/src/runtime-profiler.cc
/branches/3.9/src/runtime.cc
/branches/3.9/src/spaces.cc
/branches/3.9/src/version.cc
/branches/3.9/test/cctest/test-heap.cc
=======================================
--- /branches/3.9/src/compiler.cc Mon Mar 19 04:01:52 2012
+++ /branches/3.9/src/compiler.cc Wed Apr 18 04:52:42 2012
@@ -531,6 +531,10 @@
if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result);
}
+ } else {
+ if (result->ic_age() != HEAP->global_ic_age()) {
+ result->ResetForNewContext(HEAP->global_ic_age());
+ }
}
if (result.is_null()) isolate->ReportPendingMessages();
@@ -586,6 +590,10 @@
compilation_cache->PutEval(
source, context, is_global, result, scope_position);
}
+ } else {
+ if (result->ic_age() != HEAP->global_ic_age()) {
+ result->ResetForNewContext(HEAP->global_ic_age());
+ }
}
return result;
=======================================
--- /branches/3.9/src/factory.cc Mon Mar 19 04:01:52 2012
+++ /branches/3.9/src/factory.cc Wed Apr 18 04:52:42 2012
@@ -537,6 +537,10 @@
: isolate()->strict_mode_function_map(),
pretenure);
+ if (function_info->ic_age() != isolate()->heap()->global_ic_age()) {
+ function_info->ResetForNewContext(isolate()->heap()->global_ic_age());
+ }
+
result->set_context(*context);
if (!function_info->bound()) {
int number_of_literals = function_info->num_literals();
=======================================
--- /branches/3.9/src/full-codegen.cc Mon Mar 19 04:01:52 2012
+++ /branches/3.9/src/full-codegen.cc Wed Apr 18 04:52:42 2012
@@ -327,6 +327,7 @@
code->set_compiled_optimizable(info->IsOptimizable());
#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
+ code->set_profiler_ticks(0);
code->set_stack_check_table_offset(table_offset);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
=======================================
--- /branches/3.9/src/heap.cc Wed Mar 28 00:54:11 2012
+++ /branches/3.9/src/heap.cc Wed Apr 18 04:52:42 2012
@@ -145,7 +145,6 @@
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
- idle_notification_will_schedule_next_gc_(false),
mark_sweeps_since_idle_round_started_(0),
ms_count_at_last_idle_notification_(0),
gc_count_at_last_idle_gc_(0),
@@ -504,11 +503,17 @@
!incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() &&
FLAG_incremental_marking_steps) {
- if (FLAG_trace_incremental_marking) {
- PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
- }
- collector = SCAVENGER;
- collector_reason = "incremental marking delaying mark-sweep";
+ // Make progress in incremental marking.
+ const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
+ incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
+ IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ if (!incremental_marking()->IsComplete()) {
+ if (FLAG_trace_incremental_marking) {
+ PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+ }
+ collector = SCAVENGER;
+ collector_reason = "incremental marking delaying mark-sweep";
+ }
}
bool next_gc_likely_to_collect_more = false;
@@ -1953,7 +1958,7 @@
if (!maybe_info->To(&info)) return maybe_info;
}
info->set_ic_total_count(0);
- info->set_ic_with_typeinfo_count(0);
+ info->set_ic_with_type_info_count(0);
info->set_type_feedback_cells(TypeFeedbackCells::cast(empty_fixed_array()),
SKIP_WRITE_BARRIER);
return info;
@@ -2897,9 +2902,9 @@
share->set_inferred_name(empty_string(), SKIP_WRITE_BARRIER);
share->set_initial_map(undefined_value(), SKIP_WRITE_BARRIER);
share->set_this_property_assignments(undefined_value(),
SKIP_WRITE_BARRIER);
- share->set_deopt_counter(FLAG_deopt_every_n_times);
- share->set_profiler_ticks(0);
share->set_ast_node_count(0);
+ share->set_deopt_counter(FLAG_deopt_every_n_times);
+ share->set_ic_age(0);
// Set integer fields (smi or int, depending on the architecture).
share->set_length(0);
@@ -4817,10 +4822,8 @@
void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
- // This flag prevents incremental marking from requesting GC via stack
guard
- idle_notification_will_schedule_next_gc_ = true;
- incremental_marking()->Step(step_size);
- idle_notification_will_schedule_next_gc_ = false;
+ incremental_marking()->Step(step_size,
+ IncrementalMarking::NO_GC_VIA_STACK_GUARD);
if (incremental_marking()->IsComplete()) {
bool uncommit = false;
=======================================
--- /branches/3.9/src/heap.h Fri Mar 23 08:11:57 2012
+++ /branches/3.9/src/heap.h Wed Apr 18 04:52:42 2012
@@ -1569,10 +1569,6 @@
// The roots that have an index less than this are always in old space.
static const int kOldSpaceRoots = 0x20;
- bool idle_notification_will_schedule_next_gc() {
- return idle_notification_will_schedule_next_gc_;
- }
-
uint32_t HashSeed() {
uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
ASSERT(FLAG_randomize_hashes || seed == 0);
@@ -2033,7 +2029,6 @@
unsigned int last_idle_notification_gc_count_;
bool last_idle_notification_gc_count_init_;
- bool idle_notification_will_schedule_next_gc_;
int mark_sweeps_since_idle_round_started_;
int ms_count_at_last_idle_notification_;
unsigned int gc_count_at_last_idle_gc_;
=======================================
--- /branches/3.9/src/ic.cc Wed Mar 28 06:05:23 2012
+++ /branches/3.9/src/ic.cc Wed Apr 18 04:52:42 2012
@@ -294,60 +294,46 @@
type, HandleVector(&name, 1));
return isolate()->Throw(*error);
}
+
+
+static int ComputeTypeInfoCountDelta(IC::State old_state, IC::State
new_state) {
+ bool was_uninitialized =
+ old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
+ bool is_uninitialized =
+ new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
+ return (was_uninitialized && !is_uninitialized) ? 1 :
+ (!was_uninitialized && is_uninitialized) ? -1 : 0;
+}
void IC::PostPatching(Address address, Code* target, Code* old_target) {
- if (FLAG_type_info_threshold > 0) {
- if (old_target->is_inline_cache_stub() &&
- target->is_inline_cache_stub()) {
- State old_state = old_target->ic_state();
- State new_state = target->ic_state();
- bool was_uninitialized =
- old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
- bool is_uninitialized =
- new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
- int delta = 0;
- if (was_uninitialized && !is_uninitialized) {
- delta = 1;
- } else if (!was_uninitialized && is_uninitialized) {
- delta = -1;
- }
- if (delta != 0) {
- Code* host = target->GetHeap()->isolate()->
- inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
- // Not all Code objects have TypeFeedbackInfo.
- if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
- TypeFeedbackInfo* info =
- TypeFeedbackInfo::cast(host->type_feedback_info());
- info->set_ic_with_typeinfo_count(
- info->ic_with_typeinfo_count() + delta);
- }
- }
+ if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) {
+ return;
+ }
+ Code* host = target->GetHeap()->isolate()->
+ inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
+ if (host->kind() != Code::FUNCTION) return;
+
+ if (FLAG_type_info_threshold > 0 &&
+ old_target->is_inline_cache_stub() &&
+ target->is_inline_cache_stub()) {
+ int delta = ComputeTypeInfoCountDelta(old_target->ic_state(),
+ target->ic_state());
+ // Not all Code objects have TypeFeedbackInfo.
+ if (delta != 0 && host->type_feedback_info()->IsTypeFeedbackInfo()) {
+ TypeFeedbackInfo* info =
+ TypeFeedbackInfo::cast(host->type_feedback_info());
+ info->set_ic_with_type_info_count(
+ info->ic_with_type_info_count() + delta);
}
}
if (FLAG_watch_ic_patching) {
+ host->set_profiler_ticks(0);
Isolate::Current()->runtime_profiler()->NotifyICChanged();
- // We do not want to optimize until the ICs have settled down,
- // so when they are patched, we postpone optimization for the
- // current function and the functions above it on the stack that
- // might want to inline this one.
- StackFrameIterator it;
- if (it.done()) return;
- it.Advance();
- static const int kStackFramesToMark = Compiler::kMaxInliningLevels - 1;
- for (int i = 0; i < kStackFramesToMark; ++i) {
- if (it.done()) return;
- StackFrame* raw_frame = it.frame();
- if (raw_frame->is_java_script()) {
- JSFunction* function =
- JSFunction::cast(JavaScriptFrame::cast(raw_frame)->function());
- if (function->IsOptimized()) continue;
- SharedFunctionInfo* shared = function->shared();
- shared->set_profiler_ticks(0);
- }
- it.Advance();
- }
- }
+ }
+ // TODO(2029): When an optimized function is patched, it would
+ // be nice to propagate the corresponding type information to its
+ // unoptimized version for the benefit of later inlining.
}
=======================================
--- /branches/3.9/src/incremental-marking.cc Fri Mar 23 08:11:57 2012
+++ /branches/3.9/src/incremental-marking.cc Wed Apr 18 04:52:42 2012
@@ -204,6 +204,12 @@
RecordCodeEntrySlot(entry_address, Code::cast(target));
MarkObject(target);
}
+
+ void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {
+ if (shared->ic_age() != heap_->global_ic_age()) {
+ shared->ResetForNewContext(heap_->global_ic_age());
+ }
+ }
void VisitPointer(Object** p) {
Object* obj = *p;
@@ -743,7 +749,7 @@
}
-void IncrementalMarking::MarkingComplete() {
+void IncrementalMarking::MarkingComplete(CompletionAction action) {
state_ = COMPLETE;
// We will set the stack guard to request a GC now. This will mean the
rest
// of the GC gets performed as soon as possible (we can't do a GC here
in a
@@ -754,13 +760,14 @@
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Complete (normal).\n");
}
- if (!heap_->idle_notification_will_schedule_next_gc()) {
+ if (action == GC_VIA_STACK_GUARD) {
heap_->isolate()->stack_guard()->RequestGC();
}
}
-void IncrementalMarking::Step(intptr_t allocated_bytes) {
+void IncrementalMarking::Step(intptr_t allocated_bytes,
+ CompletionAction action) {
if (heap_->gc_state() != Heap::NOT_IN_GC ||
!FLAG_incremental_marking ||
!FLAG_incremental_marking_steps ||
@@ -833,7 +840,7 @@
Marking::MarkBlack(obj_mark_bit);
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
}
- if (marking_deque_.IsEmpty()) MarkingComplete();
+ if (marking_deque_.IsEmpty()) MarkingComplete(action);
}
allocated_ = 0;
=======================================
--- /branches/3.9/src/incremental-marking.h Mon Jan 16 03:42:08 2012
+++ /branches/3.9/src/incremental-marking.h Wed Apr 18 04:52:42 2012
@@ -46,6 +46,11 @@
COMPLETE
};
+ enum CompletionAction {
+ GC_VIA_STACK_GUARD,
+ NO_GC_VIA_STACK_GUARD
+ };
+
explicit IncrementalMarking(Heap* heap);
void TearDown();
@@ -82,7 +87,7 @@
void Abort();
- void MarkingComplete();
+ void MarkingComplete(CompletionAction action);
// It's hard to know how much work the incremental marker should do to
make
// progress in the face of the mutator creating new work for it. We
start
@@ -102,10 +107,11 @@
static const intptr_t kMaxAllocationMarkingFactor = 1000;
void OldSpaceStep(intptr_t allocated) {
- Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
+ Step(allocated * kFastMarking / kInitialAllocationMarkingFactor,
+ GC_VIA_STACK_GUARD);
}
- void Step(intptr_t allocated);
+ void Step(intptr_t allocated, CompletionAction action);
inline void RestartIfNotMarking() {
if (state_ == COMPLETE) {
=======================================
--- /branches/3.9/src/mark-compact.cc Fri Mar 23 08:11:57 2012
+++ /branches/3.9/src/mark-compact.cc Wed Apr 18 04:52:42 2012
@@ -1406,6 +1406,10 @@
if (shared->IsInobjectSlackTrackingInProgress())
shared->DetachInitialMap();
+ if (shared->ic_age() != heap->global_ic_age()) {
+ shared->ResetForNewContext(heap->global_ic_age());
+ }
+
if (!known_flush_code_candidate) {
known_flush_code_candidate = IsFlushable(heap, shared);
if (known_flush_code_candidate) {
=======================================
--- /branches/3.9/src/objects-inl.h Fri Mar 23 08:11:57 2012
+++ /branches/3.9/src/objects-inl.h Wed Apr 18 04:52:42 2012
@@ -3086,6 +3086,19 @@
ASSERT(level >= 0 && level <= kMaxLoopNestingMarker);
WRITE_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset, level);
}
+
+
+int Code::profiler_ticks() {
+ ASSERT(kind() == FUNCTION);
+ return READ_BYTE_FIELD(this, kProfilerTicksOffset);
+}
+
+
+void Code::set_profiler_ticks(int ticks) {
+ ASSERT(kind() == FUNCTION);
+ ASSERT(ticks < 256);
+ WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
+}
unsigned Code::stack_slots() {
@@ -3507,8 +3520,8 @@
ACCESSORS(SharedFunctionInfo, inferred_name, String, kInferredNameOffset)
ACCESSORS(SharedFunctionInfo, this_property_assignments, Object,
kThisPropertyAssignmentsOffset)
-
-SMI_ACCESSORS(SharedFunctionInfo, profiler_ticks, kProfilerTicksOffset)
+SMI_ACCESSORS(SharedFunctionInfo, ic_age, kICAgeOffset)
+
BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype,
kHiddenPrototypeBit)
@@ -4814,7 +4827,7 @@
SMI_ACCESSORS(TypeFeedbackInfo, ic_total_count, kIcTotalCountOffset)
-SMI_ACCESSORS(TypeFeedbackInfo, ic_with_typeinfo_count,
+SMI_ACCESSORS(TypeFeedbackInfo, ic_with_type_info_count,
kIcWithTypeinfoCountOffset)
ACCESSORS(TypeFeedbackInfo, type_feedback_cells, TypeFeedbackCells,
kTypeFeedbackCellsOffset)
=======================================
--- /branches/3.9/src/objects-printer.cc Mon Mar 12 01:18:42 2012
+++ /branches/3.9/src/objects-printer.cc Wed Apr 18 04:52:42 2012
@@ -559,8 +559,8 @@
void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
HeapObject::PrintHeader(out, "TypeFeedbackInfo");
- PrintF(out, "\n - ic_total_count: %d, ic_with_typeinfo_count: %d",
- ic_total_count(), ic_with_typeinfo_count());
+ PrintF(out, "\n - ic_total_count: %d, ic_with_type_info_count: %d",
+ ic_total_count(), ic_with_type_info_count());
PrintF(out, "\n - type_feedback_cells: ");
type_feedback_cells()->FixedArrayPrint(out);
}
=======================================
--- /branches/3.9/src/objects.cc Mon Mar 26 02:46:02 2012
+++ /branches/3.9/src/objects.cc Wed Apr 18 04:52:42 2012
@@ -1390,9 +1390,11 @@
case EXTERNAL_FLOAT_ARRAY_TYPE:
case EXTERNAL_DOUBLE_ARRAY_TYPE:
break;
- case SHARED_FUNCTION_INFO_TYPE:
- SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
+ case SHARED_FUNCTION_INFO_TYPE: {
+ SharedFunctionInfo* shared =
reinterpret_cast<SharedFunctionInfo*>(this);
+ shared->SharedFunctionInfoIterateBody(v);
break;
+ }
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE:
@@ -7867,6 +7869,22 @@
// The map survived the gc, so there may be objects referencing it.
set_live_objects_may_exist(true);
}
+
+
+void SharedFunctionInfo::ResetForNewContext(int new_ic_age) {
+ code()->ClearInlineCaches();
+ set_ic_age(new_ic_age);
+ if (code()->kind() == Code::FUNCTION) {
+ code()->set_profiler_ticks(0);
+ if (optimization_disabled() &&
+ opt_count() >= Compiler::kDefaultMaxOptCount) {
+ // Re-enable optimizations if they were disabled due to opt_count
limit.
+ set_optimization_disabled(false);
+ code()->set_optimizable(true);
+ }
+ set_opt_count(0);
+ }
+}
static void GetMinInobjectSlack(Map* map, void* data) {
@@ -7910,6 +7928,12 @@
set_expected_nof_properties(expected_nof_properties() - slack);
}
}
+
+
+void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
+ v->VisitSharedFunctionInfo(this);
+ SharedFunctionInfo::BodyDescriptor::IterateBody(this, v);
+}
#define DECLARE_TAG(ignore1, name, ignore2) name,
@@ -7968,7 +7992,6 @@
VisitPointer(&target);
CHECK_EQ(target, old_target); // VisitPointer doesn't change Code*
*target.
}
-
void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
@@ -8114,6 +8137,21 @@
}
return NULL;
}
+
+
+void Code::ClearInlineCaches() {
+ int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+ RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) |
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID) |
+ RelocInfo::ModeMask(RelocInfo::CODE_TARGET_CONTEXT);
+ for (RelocIterator it(this, mask); !it.done(); it.next()) {
+ RelocInfo* info = it.rinfo();
+ Code* target(Code::GetCodeFromTargetAddress(info->target_address()));
+ if (target->is_inline_cache_stub()) {
+ IC::Clear(info->pc());
+ }
+ }
+}
#ifdef ENABLE_DISASSEMBLER
=======================================
--- /branches/3.9/src/objects.h Fri Mar 23 08:11:57 2012
+++ /branches/3.9/src/objects.h Wed Apr 18 04:52:42 2012
@@ -4255,6 +4255,11 @@
inline void set_allow_osr_at_loop_nesting_level(int level);
inline int allow_osr_at_loop_nesting_level();
+ // [profiler_ticks]: For FUNCTION kind, tells for how many profiler ticks
+ // the code object was seen on the stack with no IC patching going on.
+ inline int profiler_ticks();
+ inline void set_profiler_ticks(int ticks);
+
// [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
// reserved in the code prologue.
inline unsigned stack_slots();
@@ -4423,6 +4428,7 @@
#ifdef DEBUG
void CodeVerify();
#endif
+ void ClearInlineCaches();
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
@@ -4473,6 +4479,7 @@
static const int kBinaryOpReturnTypeOffset = kBinaryOpTypeOffset + 1;
static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
+ static const int kProfilerTicksOffset =
kAllowOSRAtLoopNestingLevelOffset + 1;
static const int kSafepointTableOffsetOffset = kStackSlotsOffset +
kIntSize;
static const int kStackCheckTableOffsetOffset = kStackSlotsOffset +
kIntSize;
@@ -5323,16 +5330,18 @@
inline int compiler_hints();
inline void set_compiler_hints(int value);
+ inline int ast_node_count();
+ inline void set_ast_node_count(int count);
+
// A counter used to determine when to stress the deoptimizer with a
// deopt.
inline int deopt_counter();
inline void set_deopt_counter(int counter);
- inline int profiler_ticks();
- inline void set_profiler_ticks(int ticks);
-
- inline int ast_node_count();
- inline void set_ast_node_count(int count);
+ // Inline cache age is used to infer whether the function survived a
context
+ // disposal or not. In the former case we reset the opt_count.
+ inline int ic_age();
+ inline void set_ic_age(int age);
// Add information on assignments of the form this.x = ...;
void SetThisPropertyAssignmentsInfo(
@@ -5478,6 +5487,8 @@
void SharedFunctionInfoVerify();
#endif
+ void ResetForNewContext(int new_ic_age);
+
// Helpers to compile the shared code. Returns true on success, false on
// failure (e.g., stack overflow during compilation).
static bool EnsureCompiled(Handle<SharedFunctionInfo> shared,
@@ -5485,6 +5496,8 @@
static bool CompileLazy(Handle<SharedFunctionInfo> shared,
ClearExceptionFlag flag);
+ void SharedFunctionInfoIterateBody(ObjectVisitor* v);
+
// Casting.
static inline SharedFunctionInfo* cast(Object* obj);
@@ -5508,12 +5521,13 @@
kInferredNameOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInitialMapOffset + kPointerSize;
- static const int kProfilerTicksOffset =
- kThisPropertyAssignmentsOffset + kPointerSize;
+ // ic_age is a Smi field. It could be grouped with another Smi field
into a
+ // PSEUDO_SMI_ACCESSORS pair (on x64), if one becomes available.
+ static const int kICAgeOffset = kThisPropertyAssignmentsOffset +
kPointerSize;
#if V8_HOST_ARCH_32_BIT
// Smi fields.
static const int kLengthOffset =
- kProfilerTicksOffset + kPointerSize;
+ kICAgeOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset +
kPointerSize;
static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kPointerSize;
@@ -5532,8 +5546,9 @@
static const int kOptCountOffset =
kThisPropertyAssignmentsCountOffset + kPointerSize;
static const int kAstNodeCountOffset = kOptCountOffset + kPointerSize;
- static const int kDeoptCounterOffset =
- kAstNodeCountOffset + kPointerSize;
+ static const int kDeoptCounterOffset = kAstNodeCountOffset +
kPointerSize;
+
+
// Total size.
static const int kSize = kDeoptCounterOffset + kPointerSize;
#else
@@ -5547,7 +5562,7 @@
// word is not set and thus this word cannot be treated as pointer
// to HeapObject during old space traversal.
static const int kLengthOffset =
- kProfilerTicksOffset + kPointerSize;
+ kICAgeOffset + kPointerSize;
static const int kFormalParameterCountOffset =
kLengthOffset + kIntSize;
@@ -6562,8 +6577,8 @@
inline int ic_total_count();
inline void set_ic_total_count(int count);
- inline int ic_with_typeinfo_count();
- inline void set_ic_with_typeinfo_count(int count);
+ inline int ic_with_type_info_count();
+ inline void set_ic_with_type_info_count(int count);
DECL_ACCESSORS(type_feedback_cells, TypeFeedbackCells)
@@ -8530,6 +8545,8 @@
// Visit pointer embedded into a code object.
virtual void VisitEmbeddedPointer(RelocInfo* rinfo);
+
+ virtual void VisitSharedFunctionInfo(SharedFunctionInfo* shared) {}
// Visits a contiguous arrays of external references (references to the
C++
// heap) in the half-open range [start, end). Any or all of the values
=======================================
--- /branches/3.9/src/runtime-profiler.cc Wed Mar 28 00:54:11 2012
+++ /branches/3.9/src/runtime-profiler.cc Wed Apr 18 04:52:42 2012
@@ -65,6 +65,12 @@
// Number of times a function has to be seen on the stack before it is
// optimized.
static const int kProfilerTicksBeforeOptimization = 2;
+// If a function does not have enough type info (according to
+// FLAG_type_info_threshold), but has seen a huge number of ticks,
+// optimize it as it is.
+static const int kTicksWhenNotEnoughTypeInfo = 100;
+// We only have one byte to store the number of ticks.
+STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
@@ -103,20 +109,20 @@
static void GetICCounts(JSFunction* function,
- int* ic_with_typeinfo_count,
+ int* ic_with_type_info_count,
int* ic_total_count,
int* percentage) {
*ic_total_count = 0;
- *ic_with_typeinfo_count = 0;
+ *ic_with_type_info_count = 0;
Object* raw_info =
function->shared()->code()->type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
- *ic_with_typeinfo_count = info->ic_with_typeinfo_count();
+ *ic_with_type_info_count = info->ic_with_type_info_count();
*ic_total_count = info->ic_total_count();
}
*percentage = *ic_total_count > 0
- ? 100 * *ic_with_typeinfo_count / *ic_total_count
+ ? 100 * *ic_with_type_info_count / *ic_total_count
: 100;
}
@@ -259,13 +265,14 @@
}
}
- if (function->IsMarkedForLazyRecompilation() &&
- function->shared()->code()->kind() == Code::FUNCTION) {
- Code* unoptimized = function->shared()->code();
- int nesting = unoptimized->allow_osr_at_loop_nesting_level();
+ Code* shared_code = function->shared()->code();
+ if (shared_code->kind() != Code::FUNCTION) continue;
+
+ if (function->IsMarkedForLazyRecompilation()) {
+ int nesting = shared_code->allow_osr_at_loop_nesting_level();
if (nesting == 0) AttemptOnStackReplacement(function);
int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
- unoptimized->set_allow_osr_at_loop_nesting_level(new_nesting);
+ shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
}
// Do not record non-optimizable functions.
@@ -283,7 +290,7 @@
}
if (FLAG_watch_ic_patching) {
- int ticks = function->shared()->profiler_ticks();
+ int ticks = shared_code->profiler_ticks();
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, total, percentage;
@@ -292,12 +299,10 @@
// If this particular function hasn't had any ICs patched for
enough
// ticks, optimize it now.
Optimize(function, "hot and stable");
- } else if (ticks >= 100) {
- // If this function does not have enough type info, but has
- // seen a huge number of ticks, optimize it as it is.
+ } else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
Optimize(function, "not much type info but very hot");
} else {
- function->shared()->set_profiler_ticks(ticks + 1);
+ shared_code->set_profiler_ticks(ticks + 1);
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function->PrintName();
@@ -306,7 +311,7 @@
}
}
} else if (!any_ic_changed_ &&
- function->shared()->code()->instruction_size() <
kMaxSizeEarlyOpt) {
+ shared_code->instruction_size() < kMaxSizeEarlyOpt) {
// If no IC was patched since the last tick and this function is
very
// small, optimistically optimize it now.
Optimize(function, "small function");
@@ -319,7 +324,7 @@
// then type info might already be stable and we can optimize now.
Optimize(function, "stable on startup");
} else {
- function->shared()->set_profiler_ticks(ticks + 1);
+ shared_code->set_profiler_ticks(ticks + 1);
}
} else { // !FLAG_watch_ic_patching
samples[sample_count++] = function;
=======================================
--- /branches/3.9/src/runtime.cc Tue Mar 27 09:45:16 2012
+++ /branches/3.9/src/runtime.cc Wed Apr 18 04:52:42 2012
@@ -8043,8 +8043,6 @@
ASSERT(args.length() == 1);
Handle<JSFunction> function = args.at<JSFunction>(0);
- function->shared()->set_profiler_ticks(0);
-
// If the function is not compiled ignore the lazy
// recompilation. This can happen if the debugger is activated and
// the function is returned to the not compiled state.
@@ -8067,6 +8065,7 @@
function->ReplaceCode(function->shared()->code());
return function->code();
}
+ function->shared()->code()->set_profiler_ticks(0);
if (JSFunction::CompileOptimized(function,
AstNode::kNoNumber,
CLEAR_EXCEPTION)) {
=======================================
--- /branches/3.9/src/spaces.cc Tue Mar 20 06:01:16 2012
+++ /branches/3.9/src/spaces.cc Wed Apr 18 04:52:42 2012
@@ -1198,13 +1198,15 @@
allocation_info_.limit + inline_allocation_limit_step_,
high);
int bytes_allocated = static_cast<int>(new_top -
top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated);
+ heap()->incremental_marking()->Step(
+ bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = new_top;
return AllocateRaw(size_in_bytes);
} else if (AddFreshPage()) {
// Switched to new page. Try allocating again.
int bytes_allocated = static_cast<int>(old_top -
top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated);
+ heap()->incremental_marking()->Step(
+ bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = to_space_.page_low();
return AllocateRaw(size_in_bytes);
} else {
=======================================
--- /branches/3.9/src/version.cc Tue Apr 17 09:09:59 2012
+++ /branches/3.9/src/version.cc Wed Apr 18 04:52:42 2012
@@ -35,7 +35,7 @@
#define MAJOR_VERSION 3
#define MINOR_VERSION 9
#define BUILD_NUMBER 24
-#define PATCH_LEVEL 14
+#define PATCH_LEVEL 15
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
=======================================
--- /branches/3.9/test/cctest/test-heap.cc Fri Mar 9 02:52:05 2012
+++ /branches/3.9/test/cctest/test-heap.cc Wed Apr 18 04:52:42 2012
@@ -1521,16 +1521,12 @@
while (!Marking::IsBlack(Marking::MarkBitFrom(f->code())) &&
!marking->IsStopped()) {
- marking->Step(MB);
+ // Discard any pending GC requests otherwise we will get GC when we
enter
+ // code below.
+ marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
}
CHECK(marking->IsMarking());
-
- // Discard any pending GC requests otherwise we will get GC when we enter
- // code below.
- if (ISOLATE->stack_guard()->IsGCRequest()) {
- ISOLATE->stack_guard()->Continue(GC_REQUEST);
- }
{
v8::HandleScope scope;
@@ -1597,3 +1593,90 @@
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
CHECK(map->GetPrototypeTransition(*prototype)->IsMap());
}
+
+
+TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
+ i::FLAG_allow_natives_syntax = true;
+#ifdef DEBUG
+ i::FLAG_verify_heap = true;
+#endif
+ InitializeVM();
+ if (!i::V8::UseCrankshaft()) return;
+ v8::HandleScope outer_scope;
+
+ {
+ v8::HandleScope scope;
+ CompileRun(
+ "function f () {"
+ " var s = 0;"
+ " for (var i = 0; i < 100; i++) s += i;"
+ " return s;"
+ "}"
+ "f(); f();"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f();");
+ }
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CHECK(f->IsOptimized());
+
+ IncrementalMarking* marking = HEAP->incremental_marking();
+ marking->Abort();
+ marking->Start();
+
+ // The following two calls will increment HEAP->global_ic_age().
+ const int kLongIdlePauseInMs = 1000;
+ v8::V8::ContextDisposedNotification();
+ v8::V8::IdleNotification(kLongIdlePauseInMs);
+
+ while (!marking->IsStopped() && !marking->IsComplete()) {
+ marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ }
+
+ CHECK_EQ(HEAP->global_ic_age(), f->shared()->ic_age());
+ CHECK_EQ(0, f->shared()->opt_count());
+ CHECK_EQ(0, f->shared()->code()->profiler_ticks());
+}
+
+
+TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
+ i::FLAG_allow_natives_syntax = true;
+#ifdef DEBUG
+ i::FLAG_verify_heap = true;
+#endif
+ InitializeVM();
+ if (!i::V8::UseCrankshaft()) return;
+ v8::HandleScope outer_scope;
+
+ {
+ v8::HandleScope scope;
+ CompileRun(
+ "function f () {"
+ " var s = 0;"
+ " for (var i = 0; i < 100; i++) s += i;"
+ " return s;"
+ "}"
+ "f(); f();"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f();");
+ }
+ Handle<JSFunction> f =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Function>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
+ CHECK(f->IsOptimized());
+
+ HEAP->incremental_marking()->Abort();
+
+ // The following two calls will increment HEAP->global_ic_age().
+ // Since incremental marking is off, IdleNotification will do full GC.
+ const int kLongIdlePauseInMs = 1000;
+ v8::V8::ContextDisposedNotification();
+ v8::V8::IdleNotification(kLongIdlePauseInMs);
+
+ CHECK_EQ(HEAP->global_ic_age(), f->shared()->ic_age());
+ CHECK_EQ(0, f->shared()->opt_count());
+ CHECK_EQ(0, f->shared()->code()->profiler_ticks());
+}
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev