Revision: 18367
Author: [email protected]
Date: Wed Dec 18 21:23:56 2013 UTC
Log: Use an allocation site scratchpad to speed up allocaton site
processing during gc.
BUG=
[email protected]
Review URL: https://codereview.chromium.org/99133017
http://code.google.com/p/v8/source/detail?r=18367
Modified:
/branches/bleeding_edge/src/heap-inl.h
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/heap.h
/branches/bleeding_edge/src/mark-compact.cc
/branches/bleeding_edge/src/objects-inl.h
/branches/bleeding_edge/src/objects.h
=======================================
--- /branches/bleeding_edge/src/heap-inl.h Wed Dec 18 20:08:54 2013 UTC
+++ /branches/bleeding_edge/src/heap-inl.h Wed Dec 18 21:23:56 2013 UTC
@@ -490,7 +490,15 @@
object, true);
if (memento != NULL) {
ASSERT(memento->IsValid());
- memento->GetAllocationSite()->IncrementMementoFoundCount();
+ bool add_to_scratchpad =
+ memento->GetAllocationSite()->IncrementMementoFoundCount();
+ Heap* heap = object->GetIsolate()->heap();
+ if (add_to_scratchpad && heap->allocation_sites_scratchpad_length <
+ kAllocationSiteScratchpadSize) {
+ heap->allocation_sites_scratchpad[
+ heap->allocation_sites_scratchpad_length++] =
+ memento->GetAllocationSite();
+ }
}
}
}
=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Dec 18 10:40:26 2013 UTC
+++ /branches/bleeding_edge/src/heap.cc Wed Dec 18 21:23:56 2013 UTC
@@ -148,6 +148,7 @@
#ifdef VERIFY_HEAP
no_weak_object_verification_scope_depth_(0),
#endif
+ allocation_sites_scratchpad_length(0),
promotion_queue_(this),
configured_(false),
chunks_queued_for_free_(NULL),
@@ -503,25 +504,45 @@
}
-void Heap::GarbageCollectionEpilogue() {
+void Heap::ProcessPretenuringFeedback() {
if (FLAG_allocation_site_pretenuring) {
int tenure_decisions = 0;
int dont_tenure_decisions = 0;
int allocation_mementos_found = 0;
+ int allocation_sites = 0;
+ int active_allocation_sites = 0;
- Object* cur = allocation_sites_list();
- while (cur->IsAllocationSite()) {
- AllocationSite* casted = AllocationSite::cast(cur);
- allocation_mementos_found += casted->memento_found_count()->value();
- if (casted->DigestPretenuringFeedback()) {
- if (casted->GetPretenureMode() == TENURED) {
+ // If the scratchpad overflowed, we have to iterate over the allocation
+ // stites list.
+ bool use_scratchpad =
+ allocation_sites_scratchpad_length < kAllocationSiteScratchpadSize;
+
+ int i = 0;
+ Object* list_element = allocation_sites_list();
+ while (use_scratchpad ?
+ i < allocation_sites_scratchpad_length :
+ list_element->IsAllocationSite()) {
+ AllocationSite* site = use_scratchpad ?
+ allocation_sites_scratchpad[i] :
AllocationSite::cast(list_element);
+ allocation_mementos_found += site->memento_found_count()->value();
+ if (site->memento_found_count()->value() > 0) {
+ active_allocation_sites++;
+ }
+ if (site->DigestPretenuringFeedback()) {
+ if (site->GetPretenureMode() == TENURED) {
tenure_decisions++;
} else {
dont_tenure_decisions++;
}
}
- cur = casted->weak_next();
+ allocation_sites++;
+ if (use_scratchpad) {
+ i++;
+ } else {
+ list_element = site->weak_next();
+ }
}
+ allocation_sites_scratchpad_length = 0;
// TODO(mvstanton): Pretenure decisions are only made once for an
allocation
// site. Find a sane way to decide about revisiting the decision later.
@@ -530,14 +551,21 @@
(allocation_mementos_found > 0 ||
tenure_decisions > 0 ||
dont_tenure_decisions > 0)) {
- PrintF("GC: (#mementos, #tenure decisions, #donttenure decisions) "
- "(%d, %d, %d)\n",
+ PrintF("GC: (mode, #visited allocation sites, #active allocation
sites, "
+ "#mementos, #tenure decisions, #donttenure decisions) "
+ "(%s, %d, %d, %d, %d, %d)\n",
+ use_scratchpad ? "use scratchpad" : "use list",
+ allocation_sites,
+ active_allocation_sites,
allocation_mementos_found,
tenure_decisions,
dont_tenure_decisions);
}
}
+}
+
+void Heap::GarbageCollectionEpilogue() {
store_buffer()->GCEpilogue();
// In release mode, we only zap the from space under heap verification.
@@ -1564,6 +1592,8 @@
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSizeOfObjects() - survived_watermark) +
new_space_.Size()));
+ ProcessPretenuringFeedback();
+
LOG(isolate_, ResourceEvent("scavenge", "end"));
gc_state_ = NOT_IN_GC;
=======================================
--- /branches/bleeding_edge/src/heap.h Wed Dec 18 10:40:26 2013 UTC
+++ /branches/bleeding_edge/src/heap.h Wed Dec 18 21:23:56 2013 UTC
@@ -2057,6 +2057,11 @@
void GarbageCollectionPrologue();
void GarbageCollectionEpilogue();
+ // Pretenuring decisions are made based on feedback collected during new
+ // space evacuation. Note that between feedback collection and calling
this
+ // method object in old space must not move.
+ void ProcessPretenuringFeedback();
+
// Checks whether a global GC is necessary
GarbageCollector SelectGarbageCollector(AllocationSpace space,
const char** reason);
@@ -2383,6 +2388,11 @@
int no_weak_object_verification_scope_depth_;
#endif
+
+ static const int kAllocationSiteScratchpadSize = 256;
+ int allocation_sites_scratchpad_length;
+ AllocationSite*
allocation_sites_scratchpad[kAllocationSiteScratchpadSize];
+
static const int kMaxMarkSweepsInIdleRound = 7;
static const int kIdleScavengeThreshold = 5;
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Tue Dec 10 12:11:45 2013 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Wed Dec 18 21:23:56 2013 UTC
@@ -3360,6 +3360,13 @@
code_slots_filtering_required = MarkInvalidatedCode();
EvacuateNewSpace();
}
+
+ // We have to travers our allocation sites scratchpad which contains raw
+ // pointers before we move objects. During new space evacauation we
+ // gathered pretenuring statistics. The found allocation sites may not be
+ // valid after compacting old space.
+ heap()->ProcessPretenuringFeedback();
+
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
EvacuatePages();
=======================================
--- /branches/bleeding_edge/src/objects-inl.h Wed Dec 18 17:53:50 2013 UTC
+++ /branches/bleeding_edge/src/objects-inl.h Wed Dec 18 21:23:56 2013 UTC
@@ -1370,9 +1370,12 @@
}
-inline void AllocationSite::IncrementMementoFoundCount() {
+inline bool AllocationSite::IncrementMementoFoundCount() {
+ if (IsZombie()) return false;
+
int value = memento_found_count()->value();
set_memento_found_count(Smi::FromInt(value + 1));
+ return value == 0;
}
=======================================
--- /branches/bleeding_edge/src/objects.h Wed Dec 18 20:08:54 2013 UTC
+++ /branches/bleeding_edge/src/objects.h Wed Dec 18 21:23:56 2013 UTC
@@ -8136,7 +8136,9 @@
class UnusedBits: public BitField<int, 15, 14> {};
class DoNotInlineBit: public BitField<bool, 29, 1> {};
- inline void IncrementMementoFoundCount();
+ // Increments the mementos found counter and returns true when the first
+ // memento was found for a given allocation site.
+ inline bool IncrementMementoFoundCount();
inline void IncrementMementoCreateCount();
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.