Revision: 19677
Author: [email protected]
Date: Wed Mar 5 14:04:21 2014 UTC
Log: Ignore slots buffer overflow when recording entries of the
allocation sites scratchpad.
BUG=
[email protected], [email protected]
Review URL: https://codereview.chromium.org/181063033
http://code.google.com/p/v8/source/detail?r=19677
Modified:
/branches/bleeding_edge/src/heap-inl.h
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/heap.h
/branches/bleeding_edge/src/mark-compact-inl.h
/branches/bleeding_edge/src/mark-compact.cc
/branches/bleeding_edge/src/mark-compact.h
=======================================
--- /branches/bleeding_edge/src/heap-inl.h Mon Feb 17 10:41:25 2014 UTC
+++ /branches/bleeding_edge/src/heap-inl.h Wed Mar 5 14:04:21 2014 UTC
@@ -490,7 +490,8 @@
}
-void Heap::UpdateAllocationSiteFeedback(HeapObject* object) {
+void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
+ ScratchpadSlotMode mode) {
Heap* heap = object->GetHeap();
ASSERT(heap->InFromSpace(object));
@@ -518,7 +519,7 @@
if (!memento->IsValid()) return;
if (memento->GetAllocationSite()->IncrementMementoFoundCount()) {
- heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite());
+ heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(),
mode);
}
}
@@ -541,7 +542,7 @@
return;
}
- UpdateAllocationSiteFeedback(object);
+ UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
// AllocationMementos are unrooted and shouldn't survive a scavenge
ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
=======================================
--- /branches/bleeding_edge/src/heap.cc Tue Mar 4 12:51:40 2014 UTC
+++ /branches/bleeding_edge/src/heap.cc Wed Mar 5 14:04:21 2014 UTC
@@ -3654,7 +3654,8 @@
}
-void Heap::AddAllocationSiteToScratchpad(AllocationSite* site) {
+void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
+ ScratchpadSlotMode mode) {
if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize)
{
// We cannot use the normal write-barrier because slots need to be
// recorded with non-incremental marking as well. We have to explicitly
@@ -3663,7 +3664,15 @@
allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
allocation_sites_scratchpad_length_);
- mark_compact_collector()->RecordSlot(slot, slot, *slot);
+
+ if (mode == RECORD_SCRATCHPAD_SLOT) {
+ // We need to allow slots buffer overflow here since the evacuation
+ // candidates are not part of the global list of old space pages and
+ // releasing an evacuation candidate due to a slots buffer overflow
+ // results in lost pages.
+ mark_compact_collector()->RecordSlot(
+ slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW);
+ }
allocation_sites_scratchpad_length_++;
}
}
=======================================
--- /branches/bleeding_edge/src/heap.h Tue Mar 4 12:51:40 2014 UTC
+++ /branches/bleeding_edge/src/heap.h Wed Mar 5 14:04:21 2014 UTC
@@ -1504,10 +1504,16 @@
static inline void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+ enum ScratchpadSlotMode {
+ IGNORE_SCRATCHPAD_SLOT,
+ RECORD_SCRATCHPAD_SLOT
+ };
+
// An object may have an AllocationSite associated with it through a
trailing
// AllocationMemento. Its feedback should be updated when objects are
found
// in the heap.
- static inline void UpdateAllocationSiteFeedback(HeapObject* object);
+ static inline void UpdateAllocationSiteFeedback(
+ HeapObject* object, ScratchpadSlotMode mode);
// Support for partial snapshots. After calling this we have a linear
// space to write objects in each space.
@@ -2312,7 +2318,8 @@
void InitializeAllocationSitesScratchpad();
// Adds an allocation site to the scratchpad if there is space left.
- void AddAllocationSiteToScratchpad(AllocationSite* site);
+ void AddAllocationSiteToScratchpad(AllocationSite* site,
+ ScratchpadSlotMode mode);
void UpdateSurvivalRateTrend(int start_new_space_size);
=======================================
--- /branches/bleeding_edge/src/mark-compact-inl.h Tue Sep 10 14:30:36 2013
UTC
+++ /branches/bleeding_edge/src/mark-compact-inl.h Wed Mar 5 14:04:21 2014
UTC
@@ -81,14 +81,15 @@
void MarkCompactCollector::RecordSlot(Object** anchor_slot,
Object** slot,
- Object* object) {
+ Object* object,
+ SlotsBuffer::AdditionMode mode) {
Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
if (object_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(anchor_slot)) {
if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
object_page->slots_buffer_address(),
slot,
- SlotsBuffer::FAIL_ON_OVERFLOW)) {
+ mode)) {
EvictEvacuationCandidate(object_page);
}
}
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Thu Feb 27 16:07:44 2014 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Wed Mar 5 14:04:21 2014 UTC
@@ -2053,7 +2053,7 @@
int size = object->Size();
survivors_size += size;
- Heap::UpdateAllocationSiteFeedback(object);
+ Heap::UpdateAllocationSiteFeedback(object,
Heap::RECORD_SCRATCHPAD_SLOT);
offset++;
current_cell >>= 1;
=======================================
--- /branches/bleeding_edge/src/mark-compact.h Fri Feb 14 12:33:35 2014 UTC
+++ /branches/bleeding_edge/src/mark-compact.h Wed Mar 5 14:04:21 2014 UTC
@@ -690,7 +690,11 @@
void RecordCodeEntrySlot(Address slot, Code* target);
void RecordCodeTargetPatch(Address pc, Code* target);
- INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object*
object));
+ INLINE(void RecordSlot(Object** anchor_slot,
+ Object** slot,
+ Object* object,
+ SlotsBuffer::AdditionMode mode =
+ SlotsBuffer::FAIL_ON_OVERFLOW));
void MigrateObject(Address dst,
Address src,
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.