Reviewers: Hannes Payer,
Description:
Fix migration checks and extend them to the Scavenger.
[email protected]
Please review this at https://codereview.chromium.org/23060018/
SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge
Affected files:
M src/heap-inl.h
M src/heap.h
M src/heap.cc
M src/mark-compact.cc
Index: src/heap-inl.h
diff --git a/src/heap-inl.h b/src/heap-inl.h
index
3c1d4d274ba883a854d794f9eeef7763a4bb1dd9..6caa742f532b0503313846976383cf71de1aff40
100644
--- a/src/heap-inl.h
+++ b/src/heap-inl.h
@@ -439,6 +439,43 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type)
{
}
+bool Heap::AllowedToBeMigrated(HeapObject* object, AllocationSpace dst) {
+ // Object migration is governed by the following rules:
+ //
+ // 1) Objects in new-space can be migrated to one of the old spaces
+ // that matches their target space or they stay in new-space.
+ // 2) Objects in old-space stay in the same space when migrating.
+ // 3) Fillers (two or more words) can migrate due to left-trimming of
+ // fixed arrays in new-space, old-data-space and old-pointer-space.
+ // 4) Fillers (one word) can never migrate, they are skipped by
+ // incremental marking explicitly to prevent invalid pattern.
+ //
+ // Since this function is used for debugging only, we do not place
+ // asserts here, but check everything explicitly.
+ if (object->map() == one_pointer_filler_map()) return false;
+ InstanceType type = object->map()->instance_type();
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ AllocationSpace src = chunk->owner()->identity();
+ switch (src) {
+ case NEW_SPACE:
+ return dst == src || dst == TargetSpaceId(type);
+ case OLD_POINTER_SPACE:
+ return dst == src && (dst == TargetSpaceId(type) ||
object->IsFiller());
+ case OLD_DATA_SPACE:
+ return dst == src && dst == TargetSpaceId(type);
+ case CODE_SPACE:
+ return dst == src && type == CODE_TYPE;
+ case MAP_SPACE:
+ case CELL_SPACE:
+ case PROPERTY_CELL_SPACE:
+ case LO_SPACE:
+ return false;
+ }
+ UNREACHABLE();
+ return false;
+}
+
+
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
CopyWords(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
Index: src/heap.cc
diff --git a/src/heap.cc b/src/heap.cc
index
623ec31e20fca8169ba724e5be33a5f736511a34..450c1c3e7f4540c3944f7755bf6e240aca0e2995
100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -2088,10 +2088,13 @@ class ScavengingVisitor : public StaticVisitorBase {
MaybeObject* maybe_result;
if (object_contents == DATA_OBJECT) {
+ // TODO(mstarzinger): Turn this check into a regular assert soon!
+ CHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
maybe_result =
heap->old_data_space()->AllocateRaw(allocation_size);
} else {
- maybe_result =
- heap->old_pointer_space()->AllocateRaw(allocation_size);
+ // TODO(mstarzinger): Turn this check into a regular assert soon!
+ CHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+ maybe_result =
heap->old_pointer_space()->AllocateRaw(allocation_size);
}
Object* result = NULL; // Initialization to please compiler.
@@ -2121,6 +2124,8 @@ class ScavengingVisitor : public StaticVisitorBase {
return;
}
}
+ // TODO(mstarzinger): Turn this check into a regular assert soon!
+ CHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
MaybeObject* allocation =
heap->new_space()->AllocateRaw(allocation_size);
heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
Index: src/heap.h
diff --git a/src/heap.h b/src/heap.h
index
e0ffa63e9abdf8678925b12cef99b154ef76163a..3472ec09328e1278b8cfbfe95ca2f03e9d3d0571
100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1391,6 +1391,10 @@ class Heap {
inline OldSpace* TargetSpace(HeapObject* object);
static inline AllocationSpace TargetSpaceId(InstanceType type);
+ // Checks whether the given object is allowed to be migrated from it's
+ // current space into the given destination space. Used for debugging.
+ inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace
dest);
+
// Sets the stub_cache_ (only used when expanding the dictionary).
void public_set_code_stubs(UnseededNumberDictionary* value) {
roots_[kCodeStubsRootIndex] = value;
Index: src/mark-compact.cc
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index
0e8426702806379fd5290b781b7d7ffe0973c996..58687b7aeb3baec32a0995c716271375b4db7201
100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -2743,12 +2743,10 @@ void MarkCompactCollector::MigrateObject(Address
dst,
int size,
AllocationSpace dest) {
HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
- // TODO(hpayer): Replace that check with an assert.
+ // TODO(hpayer): Replace these checks with an assert.
+ CHECK(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
CHECK(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
- // TODO(hpayer): Replace this check with an assert.
- HeapObject* heap_object = HeapObject::FromAddress(src);
- CHECK(heap_->TargetSpace(heap_object) == heap_->old_pointer_space());
Address src_slot = src;
Address dst_slot = dst;
ASSERT(IsAligned(size, kPointerSize));
@@ -2794,13 +2792,6 @@ void MarkCompactCollector::MigrateObject(Address dst,
Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
} else {
ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
- // Objects in old data space can just be moved by compaction to a
different
- // page in old data space.
- // TODO(hpayer): Replace the following check with an assert.
- CHECK(!heap_->old_data_space()->Contains(src) ||
- (heap_->old_data_space()->Contains(dst) &&
- heap_->TargetSpace(HeapObject::FromAddress(src)) ==
- heap_->old_data_space()));
heap()->MoveBlock(dst, src, size);
}
Memory::Address_at(src) = dst;
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/groups/opt_out.