Revision: 11577
Author: [email protected]
Date: Wed May 16 03:07:50 2012
Log: Implement map collection for incremental marking.
This causes map transitions to be treated weakly during incremental
marking and hence allows clearing of non-live transitions. The marking
code is now shared between incremental and non-incremental mode.
[email protected]
BUG=v8:1465
TEST=cctest/test-heap/Regress1465
Review URL: https://chromiumcodereview.appspot.com/10310168
http://code.google.com/p/v8/source/detail?r=11577
Modified:
/branches/bleeding_edge/src/incremental-marking-inl.h
/branches/bleeding_edge/src/incremental-marking.cc
/branches/bleeding_edge/src/incremental-marking.h
/branches/bleeding_edge/src/mark-compact-inl.h
/branches/bleeding_edge/src/mark-compact.cc
/branches/bleeding_edge/src/mark-compact.h
/branches/bleeding_edge/test/cctest/test-heap.cc
=======================================
--- /branches/bleeding_edge/src/incremental-marking-inl.h Tue May 15
05:01:23 2012
+++ /branches/bleeding_edge/src/incremental-marking-inl.h Wed May 16
03:07:50 2012
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -118,13 +118,29 @@
void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit
mark_bit) {
- WhiteToGrey(obj, mark_bit);
+ Marking::WhiteToGrey(mark_bit);
marking_deque_.PushGrey(obj);
}
-void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
- Marking::WhiteToGrey(mark_bit);
+bool IncrementalMarking::MarkObjectAndPush(HeapObject* obj) {
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ if (!mark_bit.Get()) {
+ WhiteToGreyAndPush(obj, mark_bit);
+ return true;
+ }
+ return false;
+}
+
+
+bool IncrementalMarking::MarkObjectWithoutPush(HeapObject* obj) {
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ if (!mark_bit.Get()) {
+ mark_bit.Set();
+ MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+ return true;
+ }
+ return false;
}
=======================================
--- /branches/bleeding_edge/src/incremental-marking.cc Tue May 15 05:01:23
2012
+++ /branches/bleeding_edge/src/incremental-marking.cc Wed May 16 03:07:50
2012
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -42,6 +42,7 @@
state_(STOPPED),
marking_deque_memory_(NULL),
marking_deque_memory_committed_(false),
+ marker_(this, heap->mark_compact_collector()),
steps_count_(0),
steps_took_(0),
longest_step_(0.0),
@@ -663,6 +664,22 @@
} else if (map == global_context_map) {
// Global contexts have weak fields.
VisitGlobalContext(Context::cast(obj), &marking_visitor);
+ } else if (map->instance_type() == MAP_TYPE) {
+ Map* map = Map::cast(obj);
+ heap_->ClearCacheOnMap(map);
+
+ // When map collection is enabled we have to mark through map's
+ // transitions and back pointers in a special way to make these
links
+ // weak. Only maps for subclasses of JSReceiver can have
transitions.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ if (FLAG_collect_maps &&
+ map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+ marker_.MarkMapContents(map);
+ } else {
+ marking_visitor.VisitPointers(
+ HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+ HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
+ }
} else {
obj->Iterate(&marking_visitor);
}
@@ -807,12 +824,6 @@
Map* map = obj->map();
if (map == filler_map) continue;
- if (obj->IsMap()) {
- Map* map = Map::cast(obj);
- heap_->ClearCacheOnMap(map);
- }
-
-
int size = obj->SizeFromMap(map);
bytes_to_process -= size;
MarkBit map_mark_bit = Marking::MarkBitFrom(map);
@@ -830,6 +841,22 @@
MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
VisitGlobalContext(ctx, &marking_visitor);
+ } else if (map->instance_type() == MAP_TYPE) {
+ Map* map = Map::cast(obj);
+ heap_->ClearCacheOnMap(map);
+
+ // When map collection is enabled we have to mark through map's
+ // transitions and back pointers in a special way to make these
links
+ // weak. Only maps for subclasses of JSReceiver can have
transitions.
+ STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+ if (FLAG_collect_maps &&
+ map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
+ marker_.MarkMapContents(map);
+ } else {
+ marking_visitor.VisitPointers(
+ HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+ HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
+ }
} else if (map->instance_type() == JS_FUNCTION_TYPE) {
marking_visitor.VisitPointers(
HeapObject::RawField(obj, JSFunction::kPropertiesOffset),
=======================================
--- /branches/bleeding_edge/src/incremental-marking.h Tue May 15 05:01:23
2012
+++ /branches/bleeding_edge/src/incremental-marking.h Wed May 16 03:07:50
2012
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -154,8 +154,6 @@
inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
- inline void WhiteToGrey(HeapObject* obj, MarkBit mark_bit);
-
// Does white->black or keeps gray or black color. Returns true if
converting
// white to black.
inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
@@ -168,6 +166,16 @@
ASSERT(Marking::IsBlack(mark_bit));
return true;
}
+
+ // Marks the object grey and pushes it on the marking stack.
+ // Returns true if object needed marking and false otherwise.
+ // This is for incremental marking only.
+ INLINE(bool MarkObjectAndPush(HeapObject* obj));
+
+ // Marks the object black without pushing it on the marking stack.
+ // Returns true if object needed marking and false otherwise.
+ // This is for incremental marking only.
+ INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
inline int steps_count() {
return steps_count_;
@@ -260,6 +268,7 @@
VirtualMemory* marking_deque_memory_;
bool marking_deque_memory_committed_;
MarkingDeque marking_deque_;
+ Marker<IncrementalMarking> marker_;
int steps_count_;
double steps_took_;
=======================================
--- /branches/bleeding_edge/src/mark-compact-inl.h Tue May 15 05:01:23 2012
+++ /branches/bleeding_edge/src/mark-compact-inl.h Wed May 16 03:07:50 2012
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -50,6 +50,15 @@
abort_incremental_marking_ =
((flags & Heap::kAbortIncrementalMarkingMask) != 0);
}
+
+
+bool MarkCompactCollector::MarkObjectAndPush(HeapObject* obj) {
+ if (MarkObjectWithoutPush(obj)) {
+ marking_deque_.PushBlack(obj);
+ return true;
+ }
+ return false;
+}
void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
@@ -62,16 +71,13 @@
}
-bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* object) {
- MarkBit mark = Marking::MarkBitFrom(object);
- bool old_mark = mark.Get();
- if (!old_mark) SetMark(object, mark);
- return old_mark;
-}
-
-
-void MarkCompactCollector::MarkObjectAndPush(HeapObject* object) {
- if (!MarkObjectWithoutPush(object)) marking_deque_.PushBlack(object);
+bool MarkCompactCollector::MarkObjectWithoutPush(HeapObject* obj) {
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ if (!mark_bit.Get()) {
+ SetMark(obj, mark_bit);
+ return true;
+ }
+ return false;
}
=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Tue May 15 05:01:23 2012
+++ /branches/bleeding_edge/src/mark-compact.cc Wed May 16 03:07:50 2012
@@ -64,13 +64,13 @@
abort_incremental_marking_(false),
compacting_(false),
was_marked_incrementally_(false),
- collect_maps_(FLAG_collect_maps),
flush_monomorphic_ics_(false),
tracer_(NULL),
migration_slots_buffer_(NULL),
heap_(NULL),
code_flusher_(NULL),
- encountered_weak_maps_(NULL) { }
+ encountered_weak_maps_(NULL),
+ marker_(this, this) { }
#ifdef DEBUG
@@ -282,7 +282,7 @@
MarkLiveObjects();
ASSERT(heap_->incremental_marking()->IsStopped());
- if (collect_maps_) ClearNonLiveTransitions();
+ if (FLAG_collect_maps) ClearNonLiveTransitions();
ClearWeakMaps();
@@ -294,7 +294,7 @@
SweepSpaces();
- if (!collect_maps_) ReattachInitialMaps();
+ if (!FLAG_collect_maps) ReattachInitialMaps();
Finish();
@@ -658,11 +658,6 @@
void MarkCompactCollector::Prepare(GCTracer* tracer) {
was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
- // Disable collection of maps if incremental marking is enabled.
- // Map collection algorithm relies on a special map transition tree
traversal
- // order which is not implemented for incremental marking.
- collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_;
-
// Monomorphic ICs are preserved when possible, but need to be flushed
// when they might be keeping a Context alive, or when the heap is about
// to be serialized.
@@ -1798,11 +1793,11 @@
heap_->ClearCacheOnMap(map);
// When map collection is enabled we have to mark through map's
transitions
- // in a special way to make transition links weak.
- // Only maps for subclasses of JSReceiver can have transitions.
+ // in a special way to make transition links weak. Only maps for
subclasses
+ // of JSReceiver can have transitions.
STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
- if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
- MarkMapContents(map);
+ if (FLAG_collect_maps && map->instance_type() >=
FIRST_JS_RECEIVER_TYPE) {
+ marker_.MarkMapContents(map);
} else {
marking_deque_.PushBlack(map);
}
@@ -1812,85 +1807,86 @@
}
-void MarkCompactCollector::MarkMapContents(Map* map) {
+// Force instantiation of template instances.
+template void Marker<IncrementalMarking>::MarkMapContents(Map* map);
+template void Marker<MarkCompactCollector>::MarkMapContents(Map* map);
+
+
+template <class T>
+void Marker<T>::MarkMapContents(Map* map) {
// Mark prototype transitions array but don't push it into marking stack.
// This will make references from it weak. We will clean dead prototype
- // transitions in ClearNonLiveTransitions. But make sure that back
pointers
- // stored inside prototype transitions arrays are marked.
- Object* raw_proto_transitions = map->unchecked_prototype_transitions();
- if (raw_proto_transitions->IsFixedArray()) {
- FixedArray* prototype_transitions =
FixedArray::cast(raw_proto_transitions);
+ // transitions in ClearNonLiveTransitions.
+ Object** proto_trans_slot =
+ HeapObject::RawField(map,
Map::kPrototypeTransitionsOrBackPointerOffset);
+ HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot);
+ if (prototype_transitions->IsFixedArray()) {
+ mark_compact_collector()->RecordSlot(proto_trans_slot,
+ proto_trans_slot,
+ prototype_transitions);
MarkBit mark = Marking::MarkBitFrom(prototype_transitions);
if (!mark.Get()) {
mark.Set();
MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(),
prototype_transitions->Size());
- MarkObjectAndPush(HeapObject::cast(
-
prototype_transitions->get(Map::kProtoTransitionBackPointerOffset)));
}
}
- Object** raw_descriptor_array_slot =
+ // Make sure that the back pointer stored either in the map itself or
inside
+ // its prototype transitions array is marked. Treat pointers in the
descriptor
+ // array as weak and also mark that array to prevent visiting it later.
+
base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer()));
+
+ Object** descriptor_array_slot =
HeapObject::RawField(map,
Map::kInstanceDescriptorsOrBitField3Offset);
- Object* raw_descriptor_array = *raw_descriptor_array_slot;
- if (!raw_descriptor_array->IsSmi()) {
- MarkDescriptorArray(
- reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
+ Object* descriptor_array = *descriptor_array_slot;
+ if (!descriptor_array->IsSmi()) {
+
MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(descriptor_array));
}
- // Mark the Object* fields of the Map.
- // Since the descriptor array has been marked already, it is fine
- // that one of these fields contains a pointer to it.
- Object** start_slot = HeapObject::RawField(map,
-
Map::kPointerFieldsBeginOffset);
-
- Object** end_slot = HeapObject::RawField(map,
Map::kPointerFieldsEndOffset);
-
- StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot,
end_slot);
+ // Mark the Object* fields of the Map. Since the descriptor array has
been
+ // marked already, it is fine that one of these fields contains a pointer
+ // to it. But make sure to skip back pointer and prototype transitions.
+ STATIC_ASSERT(Map::kPointerFieldsEndOffset ==
+ Map::kPrototypeTransitionsOrBackPointerOffset + kPointerSize);
+ Object** start_slot = HeapObject::RawField(
+ map, Map::kPointerFieldsBeginOffset);
+ Object** end_slot = HeapObject::RawField(
+ map, Map::kPrototypeTransitionsOrBackPointerOffset);
+ for (Object** slot = start_slot; slot < end_slot; slot++) {
+ Object* obj = *slot;
+ if (!obj->NonFailureIsHeapObject()) continue;
+ mark_compact_collector()->RecordSlot(start_slot, slot, obj);
+ base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj));
+ }
}
-void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors,
- int offset) {
- Object** slot = HeapObject::RawField(accessors, offset);
- HeapObject* accessor = HeapObject::cast(*slot);
- if (accessor->IsMap()) return;
- RecordSlot(slot, slot, accessor);
- MarkObjectAndPush(accessor);
-}
-
-
-void MarkCompactCollector::MarkDescriptorArray(
- DescriptorArray* descriptors) {
- MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors);
- if (descriptors_mark.Get()) return;
+template <class T>
+void Marker<T>::MarkDescriptorArray(DescriptorArray* descriptors) {
// Empty descriptor array is marked as a root before any maps are marked.
- ASSERT(descriptors != heap()->empty_descriptor_array());
- SetMark(descriptors, descriptors_mark);
-
- FixedArray* contents = reinterpret_cast<FixedArray*>(
+ ASSERT(descriptors != descriptors->GetHeap()->empty_descriptor_array());
+
+ // The DescriptorArray contains a pointer to its contents array, but the
+ // contents array will be marked black and hence not be visited again.
+ if (!base_marker()->MarkObjectAndPush(descriptors)) return;
+ FixedArray* contents = FixedArray::cast(
descriptors->get(DescriptorArray::kContentArrayIndex));
- ASSERT(contents->IsHeapObject());
- ASSERT(!IsMarked(contents));
- ASSERT(contents->IsFixedArray());
ASSERT(contents->length() >= 2);
- MarkBit contents_mark = Marking::MarkBitFrom(contents);
- SetMark(contents, contents_mark);
- // Contents contains (value, details) pairs. If the details say that
the type
- // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
- // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value
as
- // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
- // CONSTANT_TRANSITION is the value an Object* (a Map*).
+ ASSERT(Marking::IsWhite(Marking::MarkBitFrom(contents)));
+ base_marker()->MarkObjectWithoutPush(contents);
+
+ // Contents contains (value, details) pairs. If the descriptor contains
a
+ // transition (value is a Map), we don't mark the value as live. It
might
+ // be set to the NULL_DESCRIPTOR in ClearNonLiveTransitions later.
for (int i = 0; i < contents->length(); i += 2) {
- // If the pair (value, details) at index i, i+1 is not
- // a transition or null descriptor, mark the value.
PropertyDetails details(Smi::cast(contents->get(i + 1)));
Object** slot = contents->data_start() + i;
if (!(*slot)->IsHeapObject()) continue;
HeapObject* value = HeapObject::cast(*slot);
- RecordSlot(slot, slot, *slot);
+ mark_compact_collector()->RecordSlot(slot, slot, *slot);
switch (details.type()) {
case NORMAL:
@@ -1898,21 +1894,22 @@
case CONSTANT_FUNCTION:
case HANDLER:
case INTERCEPTOR:
- MarkObjectAndPush(value);
+ base_marker()->MarkObjectAndPush(value);
break;
case CALLBACKS:
if (!value->IsAccessorPair()) {
- MarkObjectAndPush(value);
- } else if (!MarkObjectWithoutPush(value)) {
- MarkAccessorPairSlot(value, AccessorPair::kGetterOffset);
- MarkAccessorPairSlot(value, AccessorPair::kSetterOffset);
+ base_marker()->MarkObjectAndPush(value);
+ } else if (base_marker()->MarkObjectWithoutPush(value)) {
+ AccessorPair* accessors = AccessorPair::cast(value);
+ MarkAccessorPairSlot(accessors, AccessorPair::kGetterOffset);
+ MarkAccessorPairSlot(accessors, AccessorPair::kSetterOffset);
}
break;
case ELEMENTS_TRANSITION:
// For maps with multiple elements transitions, the transition
maps are
// stored in a FixedArray. Keep the fixed array alive but not the
maps
// that it refers to.
- if (value->IsFixedArray()) MarkObjectWithoutPush(value);
+ if (value->IsFixedArray())
base_marker()->MarkObjectWithoutPush(value);
break;
case MAP_TRANSITION:
case CONSTANT_TRANSITION:
@@ -1920,9 +1917,16 @@
break;
}
}
- // The DescriptorArray descriptors contains a pointer to its contents
array,
- // but the contents array is already marked.
- marking_deque_.PushBlack(descriptors);
+}
+
+
+template <class T>
+void Marker<T>::MarkAccessorPairSlot(AccessorPair* accessors, int offset) {
+ Object** slot = HeapObject::RawField(accessors, offset);
+ HeapObject* accessor = HeapObject::cast(*slot);
+ if (accessor->IsMap()) return;
+ mark_compact_collector()->RecordSlot(slot, slot, accessor);
+ base_marker()->MarkObjectAndPush(accessor);
}
=======================================
--- /branches/bleeding_edge/src/mark-compact.h Tue May 15 05:01:23 2012
+++ /branches/bleeding_edge/src/mark-compact.h Wed May 16 03:07:50 2012
@@ -42,6 +42,7 @@
// Forward declarations.
class CodeFlusher;
class GCTracer;
+class MarkCompactCollector;
class MarkingVisitor;
class RootMarkingVisitor;
@@ -166,7 +167,6 @@
//
----------------------------------------------------------------------------
// Marking deque for tracing live objects.
-
class MarkingDeque {
public:
MarkingDeque()
@@ -383,6 +383,34 @@
};
+//
-------------------------------------------------------------------------
+// Marker shared between incremental and non-incremental marking
+template<class BaseMarker> class Marker {
+ public:
+ Marker(BaseMarker* base_marker, MarkCompactCollector*
mark_compact_collector)
+ : base_marker_(base_marker),
+ mark_compact_collector_(mark_compact_collector) {}
+
+ // Mark pointers in a Map and its DescriptorArray together, possibly
+ // treating transitions or back pointers weak.
+ void MarkMapContents(Map* map);
+ void MarkDescriptorArray(DescriptorArray* descriptors);
+ void MarkAccessorPairSlot(AccessorPair* accessors, int offset);
+
+ private:
+ BaseMarker* base_marker() {
+ return base_marker_;
+ }
+
+ MarkCompactCollector* mark_compact_collector() {
+ return mark_compact_collector_;
+ }
+
+ BaseMarker* base_marker_;
+ MarkCompactCollector* mark_compact_collector_;
+};
+
+
// Defined in isolate.h.
class ThreadLocalTop;
@@ -584,8 +612,6 @@
bool was_marked_incrementally_;
- bool collect_maps_;
-
bool flush_monomorphic_ics_;
// A pointer to the current stack-allocated GC tracer object during a
full
@@ -608,12 +634,13 @@
//
// After: Live objects are marked and non-live objects are unmarked.
-
friend class RootMarkingVisitor;
friend class MarkingVisitor;
friend class StaticMarkingVisitor;
friend class CodeMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
+ friend class Marker<IncrementalMarking>;
+ friend class Marker<MarkCompactCollector>;
// Mark non-optimize code for functions inlined into the given optimized
// code. This will prevent it from being flushed.
@@ -631,22 +658,25 @@
void AfterMarking();
// Marks the object black and pushes it on the marking stack.
- // This is for non-incremental marking.
+ // Returns true if object needed marking and false otherwise.
+ // This is for non-incremental marking only.
+ INLINE(bool MarkObjectAndPush(HeapObject* obj));
+
+ // Marks the object black and pushes it on the marking stack.
+ // This is for non-incremental marking only.
INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
- INLINE(bool MarkObjectWithoutPush(HeapObject* object));
- INLINE(void MarkObjectAndPush(HeapObject* value));
-
- // Marks the object black. This is for non-incremental marking.
+ // Marks the object black without pushing it on the marking stack.
+ // Returns true if object needed marking and false otherwise.
+ // This is for non-incremental marking only.
+ INLINE(bool MarkObjectWithoutPush(HeapObject* obj));
+
+ // Marks the object black assuming that it is not yet marked.
+ // This is for non-incremental marking only.
INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
void ProcessNewlyMarkedObject(HeapObject* obj);
- // Mark a Map and its DescriptorArray together, skipping transitions.
- void MarkMapContents(Map* map);
- void MarkAccessorPairSlot(HeapObject* accessors, int offset);
- void MarkDescriptorArray(DescriptorArray* descriptors);
-
// Mark the heap roots and all objects reachable from them.
void MarkRoots(RootMarkingVisitor* visitor);
@@ -749,6 +779,7 @@
MarkingDeque marking_deque_;
CodeFlusher* code_flusher_;
Object* encountered_weak_maps_;
+ Marker<MarkCompactCollector> marker_;
List<Page*> evacuation_candidates_;
List<Code*> invalidated_code_;
=======================================
--- /branches/bleeding_edge/test/cctest/test-heap.cc Tue May 15 05:01:23
2012
+++ /branches/bleeding_edge/test/cctest/test-heap.cc Wed May 16 03:07:50
2012
@@ -1735,3 +1735,60 @@
CHECK(HEAP->InNewSpace(*o));
}
+
+
+static int CountMapTransitions(Map* map) {
+ int result = 0;
+ DescriptorArray* descs = map->instance_descriptors();
+ for (int i = 0; i < descs->number_of_descriptors(); i++) {
+ if (descs->IsTransitionOnly(i)) {
+ result++;
+ }
+ }
+ return result;
+}
+
+
+// Test that map transitions are cleared and maps are collected with
+// incremental marking as well.
+TEST(Regress1465) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_trace_incremental_marking = true;
+ InitializeVM();
+ v8::HandleScope scope;
+
+ #define TRANSITION_COUNT 256
+ for (int i = 0; i < TRANSITION_COUNT; i++) {
+ EmbeddedVector<char, 64> buffer;
+ OS::SNPrintF(buffer, "var o = new Object; o.prop%d = %d;", i, i);
+ CompileRun(buffer.start());
+ }
+ CompileRun("var root = new Object;");
+ Handle<JSObject> root =
+ v8::Utils::OpenHandle(
+ *v8::Handle<v8::Object>::Cast(
+ v8::Context::GetCurrent()->Global()->Get(v8_str("root"))));
+
+ // Count number of live transitions before marking.
+ int transitions_before = CountMapTransitions(root->map());
+ CompileRun("%DebugPrint(root);");
+ CHECK_EQ(TRANSITION_COUNT, transitions_before);
+
+ // Go through all incremental marking steps in one swoop.
+ IncrementalMarking* marking = HEAP->incremental_marking();
+ CHECK(marking->IsStopped());
+ marking->Start();
+ CHECK(marking->IsMarking());
+ while (!marking->IsComplete()) {
+ marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ }
+ CHECK(marking->IsComplete());
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ CHECK(marking->IsStopped());
+
+ // Count number of live transitions after marking. Note that one
transition
+ // is left, because 'o' still holds an instance of one transition target.
+ int transitions_after = CountMapTransitions(root->map());
+ CompileRun("%DebugPrint(root);");
+ CHECK_EQ(1, transitions_after);
+}
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev