Reviewers: Michael Starzinger, ulan,
Description:
Ignore slots buffer overflow when recording entries of the allocation sites
scratchpad.
BUG=
Please review this at https://codereview.chromium.org/181063033/
SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge
Affected files (+29, -11 lines):
M src/heap-inl.h
M src/heap.h
M src/heap.cc
M src/mark-compact-inl.h
M src/mark-compact.h
M src/mark-compact.cc
Index: src/heap-inl.h
diff --git a/src/heap-inl.h b/src/heap-inl.h
index
a45e3ab9d9ce1630f6b2a2a6da719fd1b42beb45..ea175bbc3b5c8cffa5b5e673c25fc468d4f62745
100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -490,7 +490,8 @@ void Heap::ScavengePointer(HeapObject** p) {
}
-void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
+void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
+ ScratchpadSlotMode mode) {
Heap* heap = object->GetHeap();
ASSERT(heap->InFromSpace(object));
@@ -518,7 +519,7 @@ void Heap::UpdateAllocationSiteFeedback(HeapObject*
object) {
if (!memento->IsValid()) return;
if (memento->GetAllocationSite()->IncrementMementoFoundCount()) {
- heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite());
+ heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(),
mode);
}
}
@@ -541,7 +542,7 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject*
object) {
return;
}
- UpdateAllocationSiteFeedback(object);
+ UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
// AllocationMementos are unrooted and shouldn't survive a scavenge
ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
Index: src/heap.cc
diff --git a/src/heap.cc b/src/heap.cc
index
1f97fc3859bddc841da80426bc0c4273f61db6bb..77ebb2a02b298eb8702461aa03d724d5f94023a0
100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -3654,7 +3654,8 @@ void Heap::InitializeAllocationSitesScratchpad() {
}
-void Heap::AddAllocationSiteToScratchpad(AllocationSite* site) {
+void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
+ ScratchpadSlotMode mode) {
if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize)
{
// We cannot use the normal write-barrier because slots need to be
// recorded with non-incremental marking as well. We have to explicitly
@@ -3663,7 +3664,11 @@ void
Heap::AddAllocationSiteToScratchpad(AllocationSite* site) {
allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
allocation_sites_scratchpad_length_);
- mark_compact_collector()->RecordSlot(slot, slot, *slot);
+
+ if (mode == RECORD_SCRATCHPAD_SLOT) {
+ mark_compact_collector()->RecordSlot(
+ slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW);
+ }
allocation_sites_scratchpad_length_++;
}
}
Index: src/heap.h
diff --git a/src/heap.h b/src/heap.h
index
c82c34e185f64e6d45db2ac44d22ef37816341dc..05683a8a0a4ad436ae8ef62882a02380d3610a46
100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1504,10 +1504,16 @@ class Heap {
static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+ enum ScratchpadSlotMode {
+ IGNORE_SCRATCHPAD_SLOT,
+ RECORD_SCRATCHPAD_SLOT
+ };
+
// An object may have an AllocationSite associated with it through a
trailing
// AllocationMemento. Its feedback should be updated when objects are
found
// in the heap.
- static inline void UpdateAllocationSiteFeedback(HeapObject* object);
+ static inline void UpdateAllocationSiteFeedback(
+ HeapObject* object, ScratchpadSlotMode mode);
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
@@ -2312,7 +2318,8 @@ class Heap {
void InitializeAllocationSitesScratchpad();
// Adds an allocation site to the scratchpad if there is space left.
- void AddAllocationSiteToScratchpad(AllocationSite* site);
+ void AddAllocationSiteToScratchpad(AllocationSite* site,
+ ScratchpadSlotMode mode);
void UpdateSurvivalRateTrend(int start_new_space_size);
Index: src/mark-compact-inl.h
diff --git a/src/mark-compact-inl.h b/src/mark-compact-inl.h
index
321309c60e20136be1a2f25213479d0331ecd73d..a42e0f7f12eed9e1ba4caae546f0c5a9ea35ae3b
100644
--- a/src/mark-compact-inl.h
+++ b/src/mark-compact-inl.h
@@ -81,14 +81,15 @@ bool MarkCompactCollector::IsMarked(Object* obj) {
void MarkCompactCollector::RecordSlot(Object** anchor_slot,
Object** slot,
- Object* object) {
+ Object* object,
+ SlotsBuffer::AdditionMode mode) {
Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (object_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(anchor_slot)) {
if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
object_page->slots_buffer_address(),
slot,
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
+ mode)) {
EvictEvacuationCandidate(object_page);
}
}
Index: src/mark-compact.cc
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index
f5504478036709908462cd0832dd9fa369e5f570..7aaad0e8d720bbbbd4e1f292e8a8dbc3da01e7ef
100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -2053,7 +2053,7 @@ int
MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
int size = object->Size();
survivors_size += size;
- Heap::UpdateAllocationSiteFeedback(object);
+ Heap::UpdateAllocationSiteFeedback(object,
Heap::RECORD_SCRATCHPAD_SLOT);
offset++;
current_cell >>= 1;
Index: src/mark-compact.h
diff --git a/src/mark-compact.h b/src/mark-compact.h
index
c966e2018e0e0c9b8ba87b02604dd91510e63b23..6019f6c649e7fd0aae35b1773869c50caec53c24
100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -690,7 +690,11 @@ class MarkCompactCollector {
void RecordCodeEntrySlot(Address slot, Code* target);
void RecordCodeTargetPatch(Address pc, Code* target);
- INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object*
object));
+ INLINE(void RecordSlot(Object** anchor_slot,
+ Object** slot,
+ Object* object,
+ SlotsBuffer::AdditionMode mode =
+ SlotsBuffer::FAIL_ON_OVERFLOW));
void MigrateObject(Address dst,
Address src,
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.