Revision: 18104
Author: [email protected]
Date: Wed Nov 27 14:03:40 2013 UTC
Log: The goal is to discover the appropriate heap space for objects
created in full
code. By the time we optimize the code, we'll be able to decide on new or
old
space based on the number of surviving objects after one or more gcs.
The mechanism is a "memento" placed behind objects in the heap. It's
currently
done for array and object literals, with plans to use mementos for
constructed
objects as well (in a later CL).
The feature is behind the flag allocation_site_pretenuring, currently off.
[email protected]
Review URL: https://codereview.chromium.org/40063002
http://code.google.com/p/v8/source/detail?r=18104
Modified:
/branches/bleeding_edge/src/allocation-site-scopes.cc
/branches/bleeding_edge/src/allocation-site-scopes.h
/branches/bleeding_edge/src/arm/full-codegen-arm.cc
/branches/bleeding_edge/src/heap-inl.h
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/heap.h
/branches/bleeding_edge/src/hydrogen.cc
/branches/bleeding_edge/src/hydrogen.h
/branches/bleeding_edge/src/ia32/full-codegen-ia32.cc
/branches/bleeding_edge/src/mark-compact.cc
/branches/bleeding_edge/src/mark-compact.h
/branches/bleeding_edge/src/mips/full-codegen-mips.cc
/branches/bleeding_edge/src/objects-inl.h
/branches/bleeding_edge/src/objects.cc
/branches/bleeding_edge/src/objects.h
/branches/bleeding_edge/src/x64/full-codegen-x64.cc
/branches/bleeding_edge/test/cctest/test-heap.cc
=======================================
--- /branches/bleeding_edge/src/allocation-site-scopes.cc Mon Nov 25
12:41:27 2013 UTC
+++ /branches/bleeding_edge/src/allocation-site-scopes.cc Wed Nov 27
14:03:40 2013 UTC
@@ -81,5 +81,22 @@
}
}
}
+
+
+bool AllocationSiteUsageContext::ShouldCreateMemento(Handle<JSObject>
object) {
+ if (activated_ &&
AllocationSite::CanTrack(object->map()->instance_type())) {
+ if (FLAG_allocation_site_pretenuring ||
+ AllocationSite::GetMode(object->GetElementsKind()) ==
+ TRACK_ALLOCATION_SITE) {
+ if (FLAG_trace_creation_allocation_sites) {
+ PrintF("*** Creating Memento for %s %p\n",
+ object->IsJSArray() ? "JSArray" : "JSObject",
+ static_cast<void*>(*object));
+ }
+ return true;
+ }
+ }
+ return false;
+}
} } // namespace v8::internal
=======================================
--- /branches/bleeding_edge/src/allocation-site-scopes.h Mon Nov 25
12:41:27 2013 UTC
+++ /branches/bleeding_edge/src/allocation-site-scopes.h Wed Nov 27
14:03:40 2013 UTC
@@ -41,16 +41,14 @@
// boilerplate with AllocationSite and AllocationMemento support.
class AllocationSiteContext {
public:
- AllocationSiteContext(Isolate* isolate, bool activated) {
+ explicit AllocationSiteContext(Isolate* isolate) {
isolate_ = isolate;
- activated_ = activated;
};
Handle<AllocationSite> top() { return top_; }
Handle<AllocationSite> current() { return current_; }
- // If activated, then recursively create mementos
- bool activated() const { return activated_; }
+ bool ShouldCreateMemento(Handle<JSObject> object) { return false; }
Isolate* isolate() { return isolate_; }
@@ -68,7 +66,6 @@
Isolate* isolate_;
Handle<AllocationSite> top_;
Handle<AllocationSite> current_;
- bool activated_;
};
@@ -77,7 +74,7 @@
class AllocationSiteCreationContext : public AllocationSiteContext {
public:
explicit AllocationSiteCreationContext(Isolate* isolate)
- : AllocationSiteContext(isolate, true) { }
+ : AllocationSiteContext(isolate) { }
Handle<AllocationSite> EnterNewScope();
void ExitScope(Handle<AllocationSite> site, Handle<JSObject> object);
@@ -90,8 +87,9 @@
public:
AllocationSiteUsageContext(Isolate* isolate, Handle<AllocationSite> site,
bool activated)
- : AllocationSiteContext(isolate, activated),
- top_site_(site) { }
+ : AllocationSiteContext(isolate),
+ top_site_(site),
+ activated_(activated) { }
inline Handle<AllocationSite> EnterNewScope() {
if (top().is_null()) {
@@ -112,9 +110,12 @@
// recursive walk of a nested literal.
ASSERT(object.is_null() || *object == scope_site->transition_info());
}
+
+ bool ShouldCreateMemento(Handle<JSObject> object);
private:
Handle<AllocationSite> top_site_;
+ bool activated_;
};
=======================================
--- /branches/bleeding_edge/src/arm/full-codegen-arm.cc Mon Nov 25 12:41:27
2013 UTC
+++ /branches/bleeding_edge/src/arm/full-codegen-arm.cc Wed Nov 27 14:03:40
2013 UTC
@@ -1783,6 +1783,14 @@
bool has_fast_elements =
IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
@@ -1792,7 +1800,7 @@
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
__ IncrementCounter(
@@ -1807,12 +1815,9 @@
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
if (has_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
=======================================
--- /branches/bleeding_edge/src/heap-inl.h Thu Nov 21 08:06:02 2013 UTC
+++ /branches/bleeding_edge/src/heap-inl.h Wed Nov 27 14:03:40 2013 UTC
@@ -481,6 +481,18 @@
void Heap::ScavengePointer(HeapObject** p) {
ScavengeObject(p, *p);
}
+
+
+void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
+ if (FLAG_allocation_site_pretenuring && object->IsJSObject()) {
+ AllocationMemento* memento = AllocationMemento::FindForJSObject(
+ JSObject::cast(object), true);
+ if (memento != NULL) {
+ ASSERT(memento->IsValid());
+ memento->GetAllocationSite()->IncrementMementoFoundCount();
+ }
+ }
+}
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
@@ -501,12 +513,7 @@
return;
}
- if (FLAG_trace_track_allocation_sites && object->IsJSObject()) {
- if (AllocationMemento::FindForJSObject(JSObject::cast(object), true) !=
- NULL) {
- object->GetIsolate()->heap()->allocation_mementos_found_++;
- }
- }
+ UpdateAllocationSiteFeedback(object);
// AllocationMementos are unrooted and shouldn't survive a scavenge
ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
=======================================
--- /branches/bleeding_edge/src/heap.cc Tue Nov 26 14:21:46 2013 UTC
+++ /branches/bleeding_edge/src/heap.cc Wed Nov 27 14:03:40 2013 UTC
@@ -87,7 +87,6 @@
contexts_disposed_(0),
global_ic_age_(0),
flush_monomorphic_ics_(false),
- allocation_mementos_found_(0),
scan_on_scavenge_pages_(0),
new_space_(this),
old_pointer_space_(NULL),
@@ -506,6 +505,40 @@
void Heap::GarbageCollectionEpilogue() {
+ if (FLAG_allocation_site_pretenuring) {
+ int tenure_decisions = 0;
+ int dont_tenure_decisions = 0;
+ int allocation_mementos_found = 0;
+
+ Object* cur = allocation_sites_list();
+ while (cur->IsAllocationSite()) {
+ AllocationSite* casted = AllocationSite::cast(cur);
+ allocation_mementos_found += casted->memento_found_count()->value();
+ if (casted->DigestPretenuringFeedback()) {
+ if (casted->GetPretenureMode() == TENURED) {
+ tenure_decisions++;
+ } else {
+ dont_tenure_decisions++;
+ }
+ }
+ cur = casted->weak_next();
+ }
+
+ // TODO(mvstanton): Pretenure decisions are only made once for an
allocation
+ // site. Find a sane way to decide about revisiting the decision later.
+
+ if (FLAG_trace_track_allocation_sites &&
+ (allocation_mementos_found > 0 ||
+ tenure_decisions > 0 ||
+ dont_tenure_decisions > 0)) {
+ PrintF("GC: (#mementos, #tenure decisions, #donttenure decisions) "
+ "(%d, %d, %d)\n",
+ allocation_mementos_found,
+ tenure_decisions,
+ dont_tenure_decisions);
+ }
+ }
+
store_buffer()->GCEpilogue();
// In release mode, we only zap the from space under heap verification.
@@ -1393,8 +1426,6 @@
void Heap::Scavenge() {
RelocationLock relocation_lock(this);
- allocation_mementos_found_ = 0;
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
#endif
@@ -1542,11 +1573,6 @@
gc_state_ = NOT_IN_GC;
scavenges_since_last_idle_round_++;
-
- if (FLAG_trace_track_allocation_sites && allocation_mementos_found_ > 0)
{
- PrintF("AllocationMementos found during scavenge = %d\n",
- allocation_mementos_found_);
- }
}
@@ -4357,6 +4383,17 @@
#endif
return new_code;
}
+
+
+void Heap::InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site) {
+ memento->set_map_no_write_barrier(allocation_memento_map());
+ ASSERT(allocation_site->map() == allocation_site_map());
+ memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
+ if (FLAG_allocation_site_pretenuring) {
+ allocation_site->IncrementMementoCreateCount();
+ }
+}
MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace
space,
@@ -4375,9 +4412,7 @@
HeapObject::cast(result)->set_map_no_write_barrier(map);
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(result) + map->instance_size());
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- ASSERT(allocation_site->map() == allocation_site_map());
- alloc_memento->set_allocation_site(*allocation_site, SKIP_WRITE_BARRIER);
+ InitializeAllocationMemento(alloc_memento, *allocation_site);
return result;
}
@@ -4810,8 +4845,7 @@
int object_size = map->instance_size();
Object* clone;
- ASSERT(site == NULL || (AllocationSite::CanTrack(map->instance_type()) &&
- map->instance_type() == JS_ARRAY_TYPE));
+ ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
@@ -4850,9 +4884,7 @@
if (site != NULL) {
AllocationMemento* alloc_memento =
reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(clone) + object_size);
- alloc_memento->set_map_no_write_barrier(allocation_memento_map());
- ASSERT(site->map() == allocation_site_map());
- alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
+ InitializeAllocationMemento(alloc_memento, site);
HeapProfiler* profiler = isolate()->heap_profiler();
if (profiler->is_tracking_allocations()) {
profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
=======================================
--- /branches/bleeding_edge/src/heap.h Thu Nov 21 08:38:51 2013 UTC
+++ /branches/bleeding_edge/src/heap.h Wed Nov 27 14:03:40 2013 UTC
@@ -1456,6 +1456,11 @@
static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+ // An object may have an AllocationSite associated with it through a
trailing
+ // AllocationMemento. Its feedback should be updated when objects are
found
+ // in the heap.
+ static inline void UpdateAllocationSiteFeedback(HeapObject* object);
+
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
void ReserveSpace(int *sizes, Address* addresses);
@@ -1892,9 +1897,6 @@
bool flush_monomorphic_ics_;
- // AllocationMementos found in new space.
- int allocation_mementos_found_;
-
int scan_on_scavenge_pages_;
NewSpace new_space_;
@@ -2110,6 +2112,8 @@
void InitializeJSObjectFromMap(JSObject* obj,
FixedArray* properties,
Map* map);
+ void InitializeAllocationMemento(AllocationMemento* memento,
+ AllocationSite* allocation_site);
bool CreateInitialMaps();
bool CreateInitialObjects();
=======================================
--- /branches/bleeding_edge/src/hydrogen.cc Mon Nov 25 14:41:46 2013 UTC
+++ /branches/bleeding_edge/src/hydrogen.cc Wed Nov 27 14:03:40 2013 UTC
@@ -2240,6 +2240,23 @@
BuildCreateAllocationMemento(array,
JSArray::kSize,
allocation_site_payload);
+ if (FLAG_allocation_site_pretenuring) {
+ // TODO(mvstanton): move this code into BuildCreateAllocationMemento
when
+ // constructed arrays also pay attention to pretenuring.
+ HObjectAccess access =
+ HObjectAccess::ForAllocationSiteOffset(
+ AllocationSite::kMementoCreateCountOffset);
+ HValue* create_info = Add<HLoadNamedField>(allocation_site_payload,
+ access);
+ HInstruction* new_create_info = HAdd::New(zone(), context(),
+ create_info,
+ graph()->GetConstant1());
+ new_create_info->ClearFlag(HValue::kCanOverflow);
+ HStoreNamedField* store =
Add<HStoreNamedField>(allocation_site_payload,
+ access,
new_create_info);
+ // No write barrier needed to store a smi.
+ store->SkipWriteBarrier();
+ }
}
int elements_location = JSArray::kSize;
@@ -9332,8 +9349,26 @@
? HType::JSArray() : HType::JSObject();
HValue* object_size_constant = Add<HConstant>(
boilerplate_object->map()->instance_size());
+
+ // We should pull pre-tenure mode from the allocation site.
+ // For now, just see what it says, and remark on it if it sez
+ // we should pretenure. That means the rudimentary counting in the
garbage
+ // collector is having an effect.
+ PretenureFlag pretenure_flag = isolate()->heap()->GetPretenureMode();
+ if (FLAG_allocation_site_pretenuring) {
+ pretenure_flag = site_context->current()->GetPretenureMode()
+ ? TENURED
+ : NOT_TENURED;
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("Hydrogen: AllocationSite %p boilerplate %p %s\n",
+ static_cast<void*>(*(site_context->current())),
+ static_cast<void*>(*boilerplate_object),
+ pretenure_flag == TENURED ? "tenured" : "not tenured");
+ }
+ }
+
HInstruction* object = Add<HAllocate>(object_size_constant, type,
- isolate()->heap()->GetPretenureMode(), instance_type);
+ pretenure_flag, instance_type);
BuildEmitObjectHeader(boilerplate_object, object);
@@ -9347,10 +9382,10 @@
HValue* object_elements_size = Add<HConstant>(elements_size);
if (boilerplate_object->HasFastDoubleElements()) {
object_elements = Add<HAllocate>(object_elements_size,
HType::JSObject(),
- isolate()->heap()->GetPretenureMode(), FIXED_DOUBLE_ARRAY_TYPE);
+ pretenure_flag, FIXED_DOUBLE_ARRAY_TYPE);
} else {
object_elements = Add<HAllocate>(object_elements_size,
HType::JSObject(),
- isolate()->heap()->GetPretenureMode(), FIXED_ARRAY_TYPE);
+ pretenure_flag, FIXED_ARRAY_TYPE);
}
}
BuildInitElementsInObjectHeader(boilerplate_object, object,
object_elements);
@@ -9363,7 +9398,8 @@
// Copy in-object properties.
if (boilerplate_object->map()->NumberOfFields() != 0) {
- BuildEmitInObjectProperties(boilerplate_object, object, site_context);
+ BuildEmitInObjectProperties(boilerplate_object, object, site_context,
+ pretenure_flag);
}
return object;
}
@@ -9416,7 +9452,8 @@
void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
Handle<JSObject> boilerplate_object,
HInstruction* object,
- AllocationSiteUsageContext* site_context) {
+ AllocationSiteUsageContext* site_context,
+ PretenureFlag pretenure_flag) {
Handle<DescriptorArray> descriptors(
boilerplate_object->map()->instance_descriptors());
int limit = boilerplate_object->map()->NumberOfOwnDescriptors();
@@ -9452,15 +9489,13 @@
if (representation.IsDouble()) {
// Allocate a HeapNumber box and store the value into it.
HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
- // TODO(mvstanton): This heap number alloc does not have a
corresponding
+ // This heap number alloc does not have a corresponding
// AllocationSite. That is okay because
// 1) it's a child object of another object with a valid
allocation site
// 2) we can just use the mode of the parent object for pretenuring
- // The todo is replace GetPretenureMode() with
- // site_context->top()->GetPretenureMode().
HInstruction* double_box =
Add<HAllocate>(heap_number_constant, HType::HeapNumber(),
- isolate()->heap()->GetPretenureMode(), HEAP_NUMBER_TYPE);
+ pretenure_flag, HEAP_NUMBER_TYPE);
AddStoreMapConstant(double_box,
isolate()->factory()->heap_number_map());
Add<HStoreNamedField>(double_box,
HObjectAccess::ForHeapNumberValue(),
=======================================
--- /branches/bleeding_edge/src/hydrogen.h Mon Nov 25 14:41:46 2013 UTC
+++ /branches/bleeding_edge/src/hydrogen.h Wed Nov 27 14:03:40 2013 UTC
@@ -2422,7 +2422,8 @@
void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
HInstruction* object,
- AllocationSiteUsageContext*
site_context);
+ AllocationSiteUsageContext*
site_context,
+ PretenureFlag pretenure_flag);
void BuildEmitElements(Handle<JSObject> boilerplate_object,
Handle<FixedArrayBase> elements,
=======================================
--- /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc Mon Nov 25
12:41:27 2013 UTC
+++ /branches/bleeding_edge/src/ia32/full-codegen-ia32.cc Wed Nov 27
14:03:40 2013 UTC
@@ -1719,6 +1719,14 @@
IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
@@ -1732,7 +1740,7 @@
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
} else if (expr->depth() > 1 || Serializer::enabled() ||
@@ -1748,14 +1756,11 @@
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
if (has_constant_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Fri Nov 22 13:10:31 2013 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Wed Nov 27 14:03:40 2013 UTC
@@ -406,8 +406,6 @@
ASSERT(state_ == PREPARE_GC);
ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
- heap()->allocation_mementos_found_ = 0;
-
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
@@ -449,11 +447,6 @@
marking_parity_ = EVEN_MARKING_PARITY;
}
- if (FLAG_trace_track_allocation_sites &&
- heap()->allocation_mementos_found_ > 0) {
- PrintF("AllocationMementos found during mark-sweep = %d\n",
- heap()->allocation_mementos_found_);
- }
tracer_ = NULL;
}
@@ -1889,6 +1882,14 @@
virtual Object* RetainAs(Object* object) {
if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
return object;
+ } else if (object->IsAllocationSite() &&
+ !(AllocationSite::cast(object)->IsZombie())) {
+ // "dead" AllocationSites need to live long enough for a traversal
of new
+ // space. These sites get a one-time reprieve.
+ AllocationSite* site = AllocationSite::cast(object);
+ site->MarkZombie();
+ site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
+ return object;
} else {
return NULL;
}
@@ -2000,12 +2001,7 @@
int size = object->Size();
survivors_size += size;
- if (FLAG_trace_track_allocation_sites && object->IsJSObject()) {
- if (AllocationMemento::FindForJSObject(JSObject::cast(object),
true)
- != NULL) {
- heap()->allocation_mementos_found_++;
- }
- }
+ Heap::UpdateAllocationSiteFeedback(object);
offset++;
current_cell >>= 1;
@@ -2096,6 +2092,12 @@
string_table->IteratePrefix(visitor);
ProcessMarkingDeque();
}
+
+
+void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
+ MarkBit mark_bit = Marking::MarkBitFrom(site);
+ SetMark(site, mark_bit);
+}
void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
=======================================
--- /branches/bleeding_edge/src/mark-compact.h Fri Oct 4 07:25:24 2013 UTC
+++ /branches/bleeding_edge/src/mark-compact.h Wed Nov 27 14:03:40 2013 UTC
@@ -739,6 +739,10 @@
// marking its contents.
void MarkWeakObjectToCodeTable();
+ // Special case for processing weak references in a full collection. We
need
+ // to artifically keep AllocationSites alive for a time.
+ void MarkAllocationSite(AllocationSite* site);
+
private:
MarkCompactCollector();
~MarkCompactCollector();
=======================================
--- /branches/bleeding_edge/src/mips/full-codegen-mips.cc Mon Nov 25
21:18:59 2013 UTC
+++ /branches/bleeding_edge/src/mips/full-codegen-mips.cc Wed Nov 27
14:03:40 2013 UTC
@@ -1795,6 +1795,14 @@
IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
__ mov(a0, result_register());
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
@@ -1805,7 +1813,7 @@
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
@@ -1820,12 +1828,9 @@
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
if (has_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
=======================================
--- /branches/bleeding_edge/src/objects-inl.h Fri Nov 22 11:35:39 2013 UTC
+++ /branches/bleeding_edge/src/objects-inl.h Wed Nov 27 14:03:40 2013 UTC
@@ -1320,6 +1320,16 @@
set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
SKIP_WRITE_BARRIER);
}
+
+
+void AllocationSite::MarkZombie() {
+ ASSERT(!IsZombie());
+ set_pretenure_decision(Smi::FromInt(kZombie));
+ // Clear all non-smi fields
+ set_transition_info(Smi::FromInt(0));
+ set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
+ SKIP_WRITE_BARRIER);
+}
// Heuristic: We only need to create allocation site info if the
boilerplate
@@ -1348,6 +1358,9 @@
inline bool AllocationSite::CanTrack(InstanceType type) {
+ if (FLAG_allocation_site_pretenuring) {
+ return type == JS_ARRAY_TYPE || type == JS_OBJECT_TYPE;
+ }
return type == JS_ARRAY_TYPE;
}
@@ -1365,6 +1378,45 @@
UNREACHABLE();
return DependentCode::kAllocationSiteTransitionChangedGroup;
}
+
+
+inline void AllocationSite::IncrementMementoFoundCount() {
+ int value = memento_found_count()->value();
+ set_memento_found_count(Smi::FromInt(value + 1));
+}
+
+
+inline void AllocationSite::IncrementMementoCreateCount() {
+ ASSERT(FLAG_allocation_site_pretenuring);
+ int value = memento_create_count()->value();
+ set_memento_create_count(Smi::FromInt(value + 1));
+}
+
+
+inline bool AllocationSite::DigestPretenuringFeedback() {
+ bool decision_made = false;
+ if (!PretenuringDecisionMade()) {
+ int create_count = memento_create_count()->value();
+ if (create_count >= kPretenureMinimumCreated) {
+ int found_count = memento_found_count()->value();
+ double ratio = static_cast<double>(found_count) / create_count;
+ if (FLAG_trace_track_allocation_sites) {
+ PrintF("AllocationSite: %p (created, found, ratio) (%d, %d, %f)\n",
+ static_cast<void*>(this), create_count, found_count, ratio);
+ }
+ int result = ratio >= kPretenureRatio ? kTenure : kDontTenure;
+ set_pretenure_decision(Smi::FromInt(result));
+ decision_made = true;
+ // TODO(mvstanton): if the decision represents a change, any
dependent
+ // code registered for pretenuring changes should be deopted.
+ }
+ }
+
+ // Clear feedback calculation fields until the next gc.
+ set_memento_found_count(Smi::FromInt(0));
+ set_memento_create_count(Smi::FromInt(0));
+ return decision_made;
+}
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object)
{
=======================================
--- /branches/bleeding_edge/src/objects.cc Mon Nov 25 13:27:36 2013 UTC
+++ /branches/bleeding_edge/src/objects.cc Wed Nov 27 14:03:40 2013 UTC
@@ -5718,10 +5718,7 @@
Handle<JSObject> copy;
if (copying) {
Handle<AllocationSite> site_to_pass;
- if (site_context()->activated() &&
- AllocationSite::CanTrack(object->map()->instance_type()) &&
- AllocationSite::GetMode(object->GetElementsKind()) ==
- TRACK_ALLOCATION_SITE) {
+ if (site_context()->ShouldCreateMemento(object)) {
site_to_pass = site_context()->current();
}
CALL_AND_RETRY_OR_DIE(isolate,
@@ -9181,9 +9178,10 @@
AllocationMemento* AllocationMemento::FindForJSObject(JSObject* object,
bool in_GC) {
// Currently, AllocationMemento objects are only allocated immediately
- // after JSArrays in NewSpace, and detecting whether a JSArray has one
- // involves carefully checking the object immediately after the JSArray
- // (if there is one) to see if it's an AllocationMemento.
+ // after JSArrays and some JSObjects in NewSpace. Detecting whether a
+ // memento is present involves carefully checking the object immediately
+ // after the current object (if there is one) to see if it's an
+ // AllocationMemento.
if (FLAG_track_allocation_sites &&
object->GetHeap()->InNewSpace(object)) {
Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag)
+
object->Size();
@@ -9201,7 +9199,9 @@
object->GetHeap()->allocation_memento_map()) {
AllocationMemento* memento = AllocationMemento::cast(
reinterpret_cast<Object*>(ptr_end + kHeapObjectTag));
- return memento;
+ if (memento->IsValid()) {
+ return memento;
+ }
}
}
}
@@ -12787,6 +12787,9 @@
CALL_HEAP_FUNCTION_VOID(object->GetIsolate(),
object->TransitionElementsKind(to_kind));
}
+
+
+const double AllocationSite::kPretenureRatio = 0.60;
bool AllocationSite::IsNestedSite() {
=======================================
--- /branches/bleeding_edge/src/objects.h Mon Nov 25 12:41:27 2013 UTC
+++ /branches/bleeding_edge/src/objects.h Wed Nov 27 14:03:40 2013 UTC
@@ -8120,6 +8120,16 @@
class AllocationSite: public Struct {
public:
static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024;
+ static const double kPretenureRatio;
+ static const int kPretenureMinimumCreated = 100;
+
+ // Values for pretenure decision field.
+ enum {
+ kUndecided = 0,
+ kDontTenure = 1,
+ kTenure = 2,
+ kZombie = 3
+ };
DECL_ACCESSORS(transition_info, Object)
// nested_site threads a list of sites that represent nested literals
@@ -8128,16 +8138,14 @@
DECL_ACCESSORS(nested_site, Object)
DECL_ACCESSORS(memento_found_count, Smi)
DECL_ACCESSORS(memento_create_count, Smi)
+ // TODO(mvstanton): we don't need a whole integer to record pretenure
+ // decision. Consider sharing space with memento_found_count.
DECL_ACCESSORS(pretenure_decision, Smi)
DECL_ACCESSORS(dependent_code, DependentCode)
DECL_ACCESSORS(weak_next, Object)
inline void Initialize();
- bool HasNestedSites() {
- return nested_site()->IsAllocationSite();
- }
-
// This method is expensive, it should only be called for reporting.
bool IsNestedSite();
@@ -8145,6 +8153,28 @@
class UnusedBits: public BitField<int, 15, 14> {};
class DoNotInlineBit: public BitField<bool, 29, 1> {};
+ inline void IncrementMementoFoundCount();
+
+ inline void IncrementMementoCreateCount();
+
+ PretenureFlag GetPretenureMode() {
+ int mode = pretenure_decision()->value();
+ // Zombie objects "decide" to be untenured.
+ return (mode == kTenure) ? TENURED : NOT_TENURED;
+ }
+
+ // The pretenuring decision is made during gc, and the zombie state
allows
+ // us to recognize when an allocation site is just being kept alive
because
+ // a later traversal of new space may discover AllocationMementos that
point
+ // to this AllocationSite.
+ bool IsZombie() {
+ return pretenure_decision()->value() == kZombie;
+ }
+
+ inline void MarkZombie();
+
+ inline bool DigestPretenuringFeedback();
+
ElementsKind GetElementsKind() {
ASSERT(!SitePointsToLiteral());
int value = Smi::cast(transition_info())->value();
@@ -8218,6 +8248,10 @@
private:
inline DependentCode::DependencyGroup ToDependencyGroup(Reason reason);
+ bool PretenuringDecisionMade() {
+ return pretenure_decision()->value() != kUndecided;
+ }
+
DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSite);
};
@@ -8229,7 +8263,10 @@
DECL_ACCESSORS(allocation_site, Object)
- bool IsValid() { return allocation_site()->IsAllocationSite(); }
+ bool IsValid() {
+ return allocation_site()->IsAllocationSite() &&
+ !AllocationSite::cast(allocation_site())->IsZombie();
+ }
AllocationSite* GetAllocationSite() {
ASSERT(IsValid());
return AllocationSite::cast(allocation_site());
=======================================
--- /branches/bleeding_edge/src/x64/full-codegen-x64.cc Mon Nov 25 12:41:27
2013 UTC
+++ /branches/bleeding_edge/src/x64/full-codegen-x64.cc Wed Nov 27 14:03:40
2013 UTC
@@ -1740,6 +1740,14 @@
IsFastObjectElementsKind(constant_elements_kind);
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
+
+ AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
+ ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
+ if (has_constant_fast_elements && !FLAG_allocation_site_pretenuring) {
+ // If the only customer of allocation sites is transitioning, then
+ // we can turn it off if we don't have anywhere else to transition to.
+ allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+ }
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
@@ -1753,7 +1761,7 @@
__ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
- DONT_TRACK_ALLOCATION_SITE,
+ allocation_site_mode,
length);
__ CallStub(&stub);
} else if (expr->depth() > 1 || Serializer::enabled() ||
@@ -1769,14 +1777,11 @@
FLAG_smi_only_arrays);
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
- AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites
- ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE;
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
if (has_constant_fast_elements) {
mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
- allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
=======================================
--- /branches/bleeding_edge/test/cctest/test-heap.cc Fri Nov 22 12:43:17
2013 UTC
+++ /branches/bleeding_edge/test/cctest/test-heap.cc Wed Nov 27 14:03:40
2013 UTC
@@ -2184,6 +2184,7 @@
TEST(OptimizedPretenuringAllocationFolding) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_allocation_site_pretenuring = false;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
@@ -2220,6 +2221,7 @@
TEST(OptimizedPretenuringAllocationFoldingBlocks) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_allocation_site_pretenuring = false;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
@@ -2256,6 +2258,7 @@
TEST(OptimizedPretenuringObjectArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_allocation_site_pretenuring = false;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
@@ -2281,6 +2284,7 @@
TEST(OptimizedPretenuringMixedInObjectProperties) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_allocation_site_pretenuring = false;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
@@ -2312,6 +2316,7 @@
TEST(OptimizedPretenuringDoubleArrayProperties) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_allocation_site_pretenuring = false;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
@@ -2337,6 +2342,7 @@
TEST(OptimizedPretenuringdoubleArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_allocation_site_pretenuring = false;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
@@ -2362,6 +2368,7 @@
TEST(OptimizedPretenuringNestedMixedArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_allocation_site_pretenuring = false;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
@@ -2396,6 +2403,7 @@
TEST(OptimizedPretenuringNestedObjectLiterals) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_allocation_site_pretenuring = false;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
@@ -2430,6 +2438,7 @@
TEST(OptimizedPretenuringNestedDoubleLiterals) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_allocation_site_pretenuring = false;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
@@ -2493,6 +2502,7 @@
TEST(OptimizedPretenuringCallNew) {
i::FLAG_allow_natives_syntax = true;
+ i::FLAG_allocation_site_pretenuring = false;
i::FLAG_pretenuring_call_new = true;
CcTest::InitializeVM();
if (!CcTest::i_isolate()->use_crankshaft() || i::FLAG_always_opt) return;
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.