Revision: 16265
Author: [email protected]
Date: Wed Aug 21 18:31:13 2013 UTC
Log: Fix migration checks and extend them to the Scavenger.
[email protected]
Review URL: https://codereview.chromium.org/23060018
http://code.google.com/p/v8/source/detail?r=16265
Modified:
/branches/bleeding_edge/src/heap-inl.h
/branches/bleeding_edge/src/heap.cc
/branches/bleeding_edge/src/heap.h
/branches/bleeding_edge/src/mark-compact.cc
=======================================
--- /branches/bleeding_edge/src/heap-inl.h Tue Jul 23 07:41:46 2013 UTC
+++ /branches/bleeding_edge/src/heap-inl.h Wed Aug 21 18:31:13 2013 UTC
@@ -437,6 +437,43 @@
return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
}
}
+
+
+bool Heap::AllowedToBeMigrated(HeapObject* object, AllocationSpace dst) {
+ // Object migration is governed by the following rules:
+ //
+ // 1) Objects in new-space can be migrated to one of the old spaces
+ // that matches their target space or they stay in new-space.
+ // 2) Objects in old-space stay in the same space when migrating.
+ // 3) Fillers (two or more words) can migrate due to left-trimming of
+ // fixed arrays in new-space, old-data-space and old-pointer-space.
+ // 4) Fillers (one word) can never migrate, they are skipped by
+ // incremental marking explicitly to prevent invalid pattern.
+ //
+ // Since this function is used for debugging only, we do not place
+ // asserts here, but check everything explicitly.
+ if (object->map() == one_pointer_filler_map()) return false;
+ InstanceType type = object->map()->instance_type();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ AllocationSpace src = chunk->owner()->identity();
+ switch (src) {
+ case NEW_SPACE:
+ return dst == src || dst == TargetSpaceId(type);
+ case OLD_POINTER_SPACE:
+ return dst == src && (dst == TargetSpaceId(type) ||
object->IsFiller());
+ case OLD_DATA_SPACE:
+ return dst == src && dst == TargetSpaceId(type);
+ case CODE_SPACE:
+ return dst == src && type == CODE_TYPE;
+ case MAP_SPACE:
+ case CELL_SPACE:
+ case PROPERTY_CELL_SPACE:
+ case LO_SPACE:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+}
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
=======================================
--- /branches/bleeding_edge/src/heap.cc Fri Aug 16 15:10:07 2013 UTC
+++ /branches/bleeding_edge/src/heap.cc Wed Aug 21 18:31:13 2013 UTC
@@ -2088,10 +2088,13 @@
MaybeObject* maybe_result;
if (object_contents == DATA_OBJECT) {
+ // TODO(mstarzinger): Turn this check into a regular assert soon!
+ CHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
maybe_result =
heap->old_data_space()->AllocateRaw(allocation_size);
} else {
- maybe_result =
- heap->old_pointer_space()->AllocateRaw(allocation_size);
+ // TODO(mstarzinger): Turn this check into a regular assert soon!
+ CHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+ maybe_result =
heap->old_pointer_space()->AllocateRaw(allocation_size);
}
Object* result = NULL; // Initialization to please compiler.
@@ -2121,6 +2124,8 @@
return;
}
}
+ // TODO(mstarzinger): Turn this check into a regular assert soon!
+ CHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
MaybeObject* allocation =
heap->new_space()->AllocateRaw(allocation_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
=======================================
--- /branches/bleeding_edge/src/heap.h Fri Aug 16 15:10:07 2013 UTC
+++ /branches/bleeding_edge/src/heap.h Wed Aug 21 18:31:13 2013 UTC
@@ -1391,6 +1391,10 @@
inline OldSpace* TargetSpace(HeapObject* object);
static inline AllocationSpace TargetSpaceId(InstanceType type);
+ // Checks whether the given object is allowed to be migrated from it's
+ // current space into the given destination space. Used for debugging.
+ inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace
dest);
+
// Sets the stub_cache_ (only used when expanding the dictionary).
void public_set_code_stubs(UnseededNumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Mon Aug 5 13:45:16 2013 UTC
+++ /branches/bleeding_edge/src/mark-compact.cc Wed Aug 21 18:31:13 2013 UTC
@@ -2743,12 +2743,10 @@
int size,
AllocationSpace dest) {
HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
- // TODO(hpayer): Replace that check with an assert.
+ // TODO(hpayer): Replace these checks with asserts.
+ CHECK(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
CHECK(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
- // TODO(hpayer): Replace this check with an assert.
- HeapObject* heap_object = HeapObject::FromAddress(src);
- CHECK(heap_->TargetSpace(heap_object) == heap_->old_pointer_space());
Address src_slot = src;
Address dst_slot = dst;
ASSERT(IsAligned(size, kPointerSize));
@@ -2794,13 +2792,6 @@
Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
} else {
ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
- // Objects in old data space can just be moved by compaction to a
different
- // page in old data space.
- // TODO(hpayer): Replace the following check with an assert.
- CHECK(!heap_->old_data_space()->Contains(src) ||
- (heap_->old_data_space()->Contains(dst) &&
- heap_->TargetSpace(HeapObject::FromAddress(src)) ==
- heap_->old_data_space()));
heap()->MoveBlock(dst, src, size);
}
Memory::Address_at(src) = dst;
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.