Revision: 22002
Author: [email protected]
Date: Wed Jun 25 09:31:25 2014 UTC
Log: Promotion is the backup strategy when semi-space copy fails and
vice versa.
BUG=
[email protected]
Review URL: https://codereview.chromium.org/356613004
http://code.google.com/p/v8/source/detail?r=22002
Modified:
/branches/bleeding_edge/src/heap-inl.h
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/heap.h
=======================================
--- /branches/bleeding_edge/src/heap-inl.h Mon Jun 23 08:50:54 2014 UTC
+++ /branches/bleeding_edge/src/heap-inl.h Wed Jun 25 09:31:25 2014 UTC
@@ -388,8 +388,6 @@
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
- // An object should be promoted if the object has survived a
- // scavenge operation.
NewSpacePage* page = NewSpacePage::FromAddress(old_address);
Address age_mark = new_space_.age_mark();
return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
=======================================
--- /branches/bleeding_edge/src/heap.cc Wed Jun 25 07:20:13 2014 UTC
+++ /branches/bleeding_edge/src/heap.cc Wed Jun 25 09:31:25 2014 UTC
@@ -1970,14 +1970,49 @@
}
}
+ template<int alignment>
+ static inline bool SemiSpaceCopyObject(Map* map,
+ HeapObject** slot,
+ HeapObject* object,
+ int object_size) {
+ Heap* heap = map->GetHeap();
+
+ int allocation_size = object_size;
+ if (alignment != kObjectAlignment) {
+ ASSERT(alignment == kDoubleAlignment);
+ allocation_size += kPointerSize;
+ }
+
+ ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
+ AllocationResult allocation =
+ heap->new_space()->AllocateRaw(allocation_size);
+
+ HeapObject* target = NULL; // Initialization to please compiler.
+ if (allocation.To(&target)) {
+ if (alignment != kObjectAlignment) {
+ target = EnsureDoubleAligned(heap, target, allocation_size);
+ }
+
+ // Order is important: slot might be inside of the target if target
+ // was allocated over a dead object and slot comes from the store
+ // buffer.
+ *slot = target;
+ MigrateObject(heap, object, target, object_size);
+
+ heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+ heap->IncrementSemiSpaceCopiedObjectSize(object_size);
+ return true;
+ }
+ return false;
+ }
+
template<ObjectContents object_contents, int alignment>
- static inline void EvacuateObject(Map* map,
- HeapObject** slot,
- HeapObject* object,
- int object_size) {
- SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
- SLOW_ASSERT(object->Size() == object_size);
+ static inline bool PromoteObject(Map* map,
+ HeapObject** slot,
+ HeapObject* object,
+ int object_size) {
+ Heap* heap = map->GetHeap();
int allocation_size = object_size;
if (alignment != kObjectAlignment) {
@@ -1985,73 +2020,68 @@
allocation_size += kPointerSize;
}
- Heap* heap = map->GetHeap();
- if (heap->ShouldBePromoted(object->address(), object_size)) {
- AllocationResult allocation;
+ AllocationResult allocation;
+ if (object_contents == DATA_OBJECT) {
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
+ allocation = heap->old_data_space()->AllocateRaw(allocation_size);
+ } else {
+ ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+ allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
+ }
- if (object_contents == DATA_OBJECT) {
- ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
- allocation = heap->old_data_space()->AllocateRaw(allocation_size);
- } else {
- ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
- allocation =
heap->old_pointer_space()->AllocateRaw(allocation_size);
+ HeapObject* target = NULL; // Initialization to please compiler.
+ if (allocation.To(&target)) {
+ if (alignment != kObjectAlignment) {
+ target = EnsureDoubleAligned(heap, target, allocation_size);
}
- HeapObject* target = NULL; // Initialization to please compiler.
- if (allocation.To(&target)) {
- if (alignment != kObjectAlignment) {
- target = EnsureDoubleAligned(heap, target, allocation_size);
+ // Order is important: slot might be inside of the target if target
+ // was allocated over a dead object and slot comes from the store
+ // buffer.
+ *slot = target;
+ MigrateObject(heap, object, target, object_size);
+
+ if (object_contents == POINTER_OBJECT) {
+ if (map->instance_type() == JS_FUNCTION_TYPE) {
+ heap->promotion_queue()->insert(
+ target, JSFunction::kNonWeakFieldsEndOffset);
+ } else {
+ heap->promotion_queue()->insert(target, object_size);
}
+ }
+ heap->IncrementPromotedObjectsSize(object_size);
+ return true;
+ }
+ return false;
+ }
- // Order is important: slot might be inside of the target if target
- // was allocated over a dead object and slot comes from the store
- // buffer.
- *slot = target;
- MigrateObject(heap, object, target, object_size);
- if (object_contents == POINTER_OBJECT) {
- if (map->instance_type() == JS_FUNCTION_TYPE) {
- heap->promotion_queue()->insert(
- target, JSFunction::kNonWeakFieldsEndOffset);
- } else {
- heap->promotion_queue()->insert(target, object_size);
- }
- }
+ template<ObjectContents object_contents, int alignment>
+ static inline void EvacuateObject(Map* map,
+ HeapObject** slot,
+ HeapObject* object,
+ int object_size) {
+ SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ SLOW_ASSERT(object->Size() == object_size);
+ Heap* heap = map->GetHeap();
- heap->IncrementPromotedObjectsSize(object_size);
+ if (!heap->ShouldBePromoted(object->address(), object_size)) {
+ // A semi-space copy may fail due to fragmentation. In that case, we
+ // try to promote the object.
+ if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
return;
}
}
- ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
- AllocationResult allocation =
- heap->new_space()->AllocateRaw(allocation_size);
- heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
- // Allocation in the other semi-space may fail due to fragmentation.
- // In that case we allocate in the old generation.
- if (allocation.IsRetry()) {
- if (object_contents == DATA_OBJECT) {
- ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
- allocation = heap->old_data_space()->AllocateRaw(allocation_size);
- } else {
- ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
- allocation =
heap->old_pointer_space()->AllocateRaw(allocation_size);
- }
+ if (PromoteObject<object_contents, alignment>(
+ map, slot, object, object_size)) {
+ return;
}
- HeapObject* target = HeapObject::cast(allocation.ToObjectChecked());
-
- if (alignment != kObjectAlignment) {
- target = EnsureDoubleAligned(heap, target, allocation_size);
- }
+ // If promotion failed, we try to copy the object to the other
semi-space
+ if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size))
return;
- // Order is important: slot might be inside of the target if target
- // was allocated over a dead object and slot comes from the store
- // buffer.
- *slot = target;
- MigrateObject(heap, object, target, object_size);
- heap->IncrementSemiSpaceCopiedObjectSize(object_size);
- return;
+ UNREACHABLE();
}
=======================================
--- /branches/bleeding_edge/src/heap.h Tue Jun 24 14:03:24 2014 UTC
+++ /branches/bleeding_edge/src/heap.h Wed Jun 25 09:31:25 2014 UTC
@@ -1200,10 +1200,8 @@
void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
- // Helper function that governs the promotion policy from new space to
- // old. If the object's old address lies below the new space's age
- // mark or if we've already filled the bottom 1/16th of the to space,
- // we try to promote this object.
+ // An object should be promoted if the object has survived a
+ // scavenge operation.
inline bool ShouldBePromoted(Address old_address, int object_size);
void ClearJSFunctionResultCaches();
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.