Revision: 6616
Author: [email protected]
Date: Thu Feb  3 05:20:16 2011
Log: Start using store buffers.  Handle store buffer overflow situation.
Only on Intel architectures at the moment.
Review URL: http://codereview.chromium.org/6250076
http://code.google.com/p/v8/source/detail?r=6616

Modified:
 /branches/experimental/gc/src/heap-inl.h
 /branches/experimental/gc/src/heap.cc
 /branches/experimental/gc/src/heap.h
 /branches/experimental/gc/src/mark-compact.cc
 /branches/experimental/gc/src/mark-compact.h
 /branches/experimental/gc/src/objects-inl.h
 /branches/experimental/gc/src/spaces-inl.h
 /branches/experimental/gc/src/spaces.cc
 /branches/experimental/gc/src/spaces.h
 /branches/experimental/gc/src/store-buffer-inl.h
 /branches/experimental/gc/src/store-buffer.cc
 /branches/experimental/gc/src/store-buffer.h
 /branches/experimental/gc/src/v8globals.h
 /branches/experimental/gc/test/cctest/test-heap.cc

=======================================
--- /branches/experimental/gc/src/heap-inl.h    Mon Jan 24 06:34:54 2011
+++ /branches/experimental/gc/src/heap-inl.h    Thu Feb  3 05:20:16 2011
@@ -387,7 +387,12 @@
   // If the first word is a forwarding address, the object has already been
   // copied.
   if (first_word.IsForwardingAddress()) {
-    *p = first_word.ToForwardingAddress();
+    HeapObject* dest = first_word.ToForwardingAddress();
+    *p = dest;
+    Address slot = reinterpret_cast<Address>(p);
+    if (Heap::InNewSpace(dest) && !Heap::InNewSpace(slot)) {
+      StoreBuffer::EnterDirectlyIntoStoreBuffer(slot);
+    }
     return;
   }

=======================================
--- /branches/experimental/gc/src/heap.cc       Mon Jan 24 06:34:54 2011
+++ /branches/experimental/gc/src/heap.cc       Thu Feb  3 05:20:16 2011
@@ -42,6 +42,7 @@
 #include "scanner-base.h"
 #include "scopeinfo.h"
 #include "snapshot.h"
+#include "store-buffer.h"
 #include "v8threads.h"
 #include "vm-state-inl.h"
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
@@ -630,24 +631,6 @@
 }


-#ifdef DEBUG
-
-enum PageWatermarkValidity {
-  ALL_VALID,
-  ALL_INVALID
-};
-
-static void VerifyPageWatermarkValidity(PagedSpace* space,
-                                        PageWatermarkValidity validity) {
-  PageIterator it(space, PageIterator::PAGES_IN_USE);
-  bool expected_value = (validity == ALL_VALID);
-  while (it.has_next()) {
-    Page* page = it.next();
-    ASSERT(page->IsWatermarkValid() == expected_value);
-  }
-}
-#endif
-
 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
   double survival_rate =
       (static_cast<double>(young_survivors_after_last_gc_) * 100) /
@@ -942,16 +925,12 @@
   gc_state_ = SCAVENGE;

   Page::FlipMeaningOfInvalidatedWatermarkFlag();
-#ifdef DEBUG
-  VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
-  VerifyPageWatermarkValidity(map_space_, ALL_VALID);
-#endif

   // We do not update an allocation watermark of the top page during linear
   // allocation to avoid overhead. So to maintain the watermark invariant
// we have to manually cache the watermark and mark the top page as having an - // invalid watermark. This guarantees that dirty regions iteration will use a
-  // correct watermark even if a linear allocation happens.
+ // invalid watermark. This guarantees that old space pointer iteration will
+  // use a correct watermark even if a linear allocation happens.
   old_pointer_space_->FlushTopPageWatermark();
   map_space_->FlushTopPageWatermark();

@@ -999,19 +978,11 @@
   // Copy roots.
   IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);

-  // Copy objects reachable from the old generation.  By definition,
-  // there are no intergenerational pointers in code or data spaces.
-  IterateDirtyRegions(old_pointer_space_,
-                      &IteratePointersInDirtyRegion,
-                      &ScavengePointer,
-                      WATERMARK_CAN_BE_INVALID);
-
-  IterateDirtyRegions(map_space_,
-                      &IteratePointersInDirtyMapsRegion,
-                      &ScavengePointer,
-                      WATERMARK_CAN_BE_INVALID);
-
-  lo_space_->IterateDirtyRegions(&ScavengePointer);
+  // Copy objects reachable from the old generation.
+  {
+    StoreBufferRebuildScope scope;
+    StoreBuffer::IteratePointersToNewSpace(&ScavengeObject);
+  }

   // Copy objects reachable from cells by scavenging cell values directly.
   HeapObjectIterator cell_iterator(cell_space_);
@@ -1209,19 +1180,22 @@
     }

     // Promote and process all the to-be-promoted objects.
-    while (!promotion_queue.is_empty()) {
-      HeapObject* target;
-      int size;
-      promotion_queue.remove(&target, &size);
-
-      // Promoted object might be already partially visited
-      // during dirty regions iteration. Thus we search specificly
-      // for pointers to from semispace instead of looking for pointers
-      // to new space.
-      ASSERT(!target->IsMap());
-      IterateAndMarkPointersToFromSpace(target->address(),
-                                        target->address() + size,
-                                        &ScavengePointer);
+    {
+      StoreBufferRebuildScope scope;
+      while (!promotion_queue.is_empty()) {
+        HeapObject* target;
+        int size;
+        promotion_queue.remove(&target, &size);
+
+        // Promoted object might be already partially visited
+        // during old space pointer iteration. Thus we search specificly
+        // for pointers to from semispace instead of looking for pointers
+        // to new space.
+        ASSERT(!target->IsMap());
+        IterateAndMarkPointersToFromSpace(target->address(),
+                                          target->address() + size,
+                                          &ScavengeObject);
+      }
     }

     // Take another spin if there are now unswept objects in new space
@@ -1368,6 +1342,10 @@
     Object* result =
         Heap::new_space()->AllocateRaw(object_size)->ToObjectUnchecked();
     *slot = MigrateObject(object, HeapObject::cast(result), object_size);
+    if (!Heap::InNewSpace(reinterpret_cast<Address>(slot))) {
+      StoreBuffer::EnterDirectlyIntoStoreBuffer(
+          reinterpret_cast<Address>(slot));
+    }
     return;
   }

@@ -3899,13 +3877,17 @@


 #ifdef DEBUG
-static void DummyScavengePointer(HeapObject** p) {
+static void DummyScavengePointer(HeapObject** p, HeapObject* o) {
+  // When we are not in GC the Heap::InNewSpace() predicate
+  // checks that pointers which satisfy predicate point into
+  // the active semispace.
+  Heap::InNewSpace(*p);
 }


 static void VerifyPointersUnderWatermark(
     PagedSpace* space,
-    DirtyRegionCallback visit_dirty_region) {
+    PointerRegionCallback visit_pointer_region) {
   PageIterator it(space, PageIterator::PAGES_IN_USE);

   while (it.has_next()) {
@@ -3913,11 +3895,9 @@
     Address start = page->ObjectAreaStart();
     Address end = page->AllocationWatermark();

-    Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
-                              start,
-                              end,
-                              visit_dirty_region,
-                              &DummyScavengePointer);
+    Heap::IteratePointersToNewSpace(start,
+                                    end,
+                                    &DummyScavengePointer);
   }
 }

@@ -3956,14 +3936,11 @@
   map_space_->Verify(&visitor);

   VerifyPointersUnderWatermark(old_pointer_space_,
-                               &IteratePointersInDirtyRegion);
+                               &IteratePointersToNewSpace);
   VerifyPointersUnderWatermark(map_space_,
-                               &IteratePointersInDirtyMapsRegion);
+                               &IteratePointersFromMapsToNewSpace);
   VerifyPointersUnderWatermark(lo_space_);

-  VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
-  VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
-
   VerifyPointersVisitor no_dirty_regions_visitor;
   old_data_space_->Verify(&no_dirty_regions_visitor);
   code_space_->Verify(&no_dirty_regions_visitor);
@@ -4056,26 +4033,19 @@
 #endif  // DEBUG


-bool Heap::IteratePointersInDirtyRegion(Address start,
-                                        Address end,
- ObjectSlotCallback copy_object_func) {
-  bool pointers_to_new_space_found = false;
-
+void Heap::IteratePointersToNewSpace(Address start,
+                                     Address end,
+                                     ObjectSlotCallback copy_object_func) {
   for (Address slot_address = start;
        slot_address < end;
        slot_address += kPointerSize) {
     Object** slot = reinterpret_cast<Object**>(slot_address);
     if (Heap::InNewSpace(*slot)) {
-      ASSERT((*slot)->IsHeapObject());
-      copy_object_func(reinterpret_cast<HeapObject**>(slot));
-      if (Heap::InNewSpace(*slot)) {
-        ASSERT((*slot)->IsHeapObject());
-        StoreBuffer::Mark(reinterpret_cast<Address>(slot));
-        pointers_to_new_space_found = true;
-      }
+      HeapObject* object = reinterpret_cast<HeapObject*>(*slot);
+      ASSERT(object->IsHeapObject());
+      copy_object_func(reinterpret_cast<HeapObject**>(slot), object);
     }
   }
-  return pointers_to_new_space_found;
 }


@@ -4093,14 +4063,14 @@
 }


-static bool IteratePointersInDirtyMaps(Address start,
-                                       Address end,
- ObjectSlotCallback copy_object_func) {
+static void IteratePointersToNewSpaceInMaps(
+    Address start,
+    Address end,
+    ObjectSlotCallback copy_object_func) {
   ASSERT(MapStartAlign(start) == start);
   ASSERT(MapEndAlign(end) == end);

   Address map_address = start;
-  bool pointers_to_new_space_found = false;

   while (map_address < end) {
     ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
@@ -4109,68 +4079,27 @@
Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset; Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;

-    if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
-                                           pointer_fields_end,
-                                           copy_object_func)) {
-      pointers_to_new_space_found = true;
-    }
-
+    Heap::IteratePointersToNewSpace(pointer_fields_start,
+                                    pointer_fields_end,
+                                    copy_object_func);
     map_address += Map::kSize;
   }
-
-  return pointers_to_new_space_found;
 }


-bool Heap::IteratePointersInDirtyMapsRegion(
+void Heap::IteratePointersFromMapsToNewSpace(
     Address start,
     Address end,
     ObjectSlotCallback copy_object_func) {
   Address map_aligned_start = MapStartAlign(start);
   Address map_aligned_end   = MapEndAlign(end);

-  bool contains_pointers_to_new_space = false;
-
-  if (map_aligned_start != start) {
-    Address prev_map = map_aligned_start - Map::kSize;
-    ASSERT(Memory::Object_at(prev_map)->IsMap());
-
-    Address pointer_fields_start =
-        Max(start, prev_map + Map::kPointerFieldsBeginOffset);
-
-    Address pointer_fields_end =
-        Min(prev_map + Map::kPointerFieldsEndOffset, end);
-
-    contains_pointers_to_new_space =
-      IteratePointersInDirtyRegion(pointer_fields_start,
-                                   pointer_fields_end,
-                                   copy_object_func)
-        || contains_pointers_to_new_space;
-  }
-
-  contains_pointers_to_new_space =
-    IteratePointersInDirtyMaps(map_aligned_start,
-                               map_aligned_end,
-                               copy_object_func)
-      || contains_pointers_to_new_space;
-
-  if (map_aligned_end != end) {
-    ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
-
-    Address pointer_fields_start =
-        map_aligned_end + Map::kPointerFieldsBeginOffset;
-
-    Address pointer_fields_end =
-        Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
-
-    contains_pointers_to_new_space =
-      IteratePointersInDirtyRegion(pointer_fields_start,
-                                   pointer_fields_end,
-                                   copy_object_func)
-        || contains_pointers_to_new_space;
-  }
-
-  return contains_pointers_to_new_space;
+  ASSERT(map_aligned_start == start);
+  ASSERT(map_aligned_end == end);
+
+  IteratePointersToNewSpaceInMaps(map_aligned_start,
+                                  map_aligned_end,
+                                  copy_object_func);
 }


@@ -4180,29 +4109,29 @@
   Address slot_address = start;
   while (slot_address < end) {
     Object** slot = reinterpret_cast<Object**>(slot_address);
-    if (Heap::InFromSpace(*slot)) {
-      ASSERT((*slot)->IsHeapObject());
-      callback(reinterpret_cast<HeapObject**>(slot));
+    Object* object = *slot;
+    // In normal store buffer operation we use this function to process the
+    // promotion queue and we never scan an object twice so we will not see
+    // pointers that have already been updated to point to to-space.  But
+    // in the case of store buffer overflow we scan the entire old space to
+    // find pointers that point to new-space and in that case we may hit
+    // newly promoted objects and fix the pointers before the promotion
+    // queue gets to them.
+    ASSERT(StoreBuffer::store_buffer_mode() !=
+               StoreBuffer::kStoreBufferFunctional ||
+           !Heap::InToSpace(object));
+    if (Heap::InFromSpace(object)) {
+ callback(reinterpret_cast<HeapObject**>(slot), HeapObject::cast(object));
       if (Heap::InNewSpace(*slot)) {
+        ASSERT(Heap::InToSpace(*slot));
         ASSERT((*slot)->IsHeapObject());
-        StoreBuffer::Mark(reinterpret_cast<Address>(slot));
+        StoreBuffer::EnterDirectlyIntoStoreBuffer(
+            reinterpret_cast<Address>(slot));
       }
     }
     slot_address += kPointerSize;
   }
 }
-
-
-uint32_t Heap::IterateDirtyRegions(
-    uint32_t marks,
-    Address area_start,
-    Address area_end,
-    DirtyRegionCallback visit_dirty_region,
-    ObjectSlotCallback copy_object_func) {
-  ASSERT(marks == Page::kAllRegionsDirtyMarks);
-  visit_dirty_region(area_start, area_end, copy_object_func);
-  return Page::kAllRegionsDirtyMarks;
-}


 #ifdef DEBUG
@@ -4353,9 +4282,9 @@
 #endif


-void Heap::IterateDirtyRegions(
+void Heap::IteratePointers(
     PagedSpace* space,
-    DirtyRegionCallback visit_dirty_region,
+    PointerRegionCallback visit_pointer_region,
     ObjectSlotCallback copy_object_func,
     ExpectedPageWatermarkState expected_page_watermark_state) {

@@ -4380,11 +4309,7 @@
            (space == map_space_ &&
             ((page->ObjectAreaStart() - end) % Map::kSize == 0)));

-    IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
-                        start,
-                        end,
-                        visit_dirty_region,
-                        copy_object_func);
+    visit_pointer_region(start, end, copy_object_func);

// Mark page watermark as invalid to maintain watermark validity invariant.
     // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
=======================================
--- /branches/experimental/gc/src/heap.h        Mon Jan 24 06:34:54 2011
+++ /branches/experimental/gc/src/heap.h        Thu Feb  3 05:20:16 2011
@@ -214,9 +214,9 @@

 typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);

-typedef bool (*DirtyRegionCallback)(Address start,
-                                    Address end,
-                                    ObjectSlotCallback copy_object_func);
+typedef void (*PointerRegionCallback)(Address start,
+                                      Address end,
+                                      ObjectSlotCallback copy_object_func);


 // The all static Heap captures the interface to the global object heap.
@@ -821,49 +821,36 @@
     WATERMARK_CAN_BE_INVALID
   };

-  // For each dirty region on a page in use from an old space call
-  // visit_dirty_region callback.
-  // If either visit_dirty_region or callback can cause an allocation
+  // For each region of pointers on a page in use from an old space call
+  // visit_pointer_region callback.
+  // If either visit_pointer_region or callback can cause an allocation
   // in old space and changes in allocation watermark then
   // can_preallocate_during_iteration should be set to true.
   // All pages will be marked as having invalid watermark upon
   // iteration completion.
-  static void IterateDirtyRegions(
+  static void IteratePointers(
       PagedSpace* space,
-      DirtyRegionCallback visit_dirty_region,
+      PointerRegionCallback visit_pointer_region,
       ObjectSlotCallback callback,
       ExpectedPageWatermarkState expected_page_watermark_state);

-  // Interpret marks as a bitvector of dirty marks for regions of size
-  // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
-  // memory interval from start to top. For each dirty region call a
-  // visit_dirty_region callback. Return updated bitvector of dirty marks.
-  static uint32_t IterateDirtyRegions(uint32_t marks,
-                                      Address start,
-                                      Address end,
- DirtyRegionCallback visit_dirty_region,
-                                      ObjectSlotCallback callback);
-
// Iterate pointers to from semispace of new space found in memory interval
   // from start to end.
-  // Update dirty marks for page containing start address.
   static void IterateAndMarkPointersToFromSpace(Address start,
                                                 Address end,
ObjectSlotCallback callback);

// Iterate pointers to new space found in memory interval from start to end.
-  // Return true if pointers to new space was found.
-  static bool IteratePointersInDirtyRegion(Address start,
-                                           Address end,
-                                           ObjectSlotCallback callback);
+  static void IteratePointersToNewSpace(Address start,
+                                        Address end,
+                                        ObjectSlotCallback callback);


// Iterate pointers to new space found in memory interval from start to end.
   // This interval is considered to belong to the map space.
-  // Return true if pointers to new space was found.
-  static bool IteratePointersInDirtyMapsRegion(Address start,
-                                               Address end,
- ObjectSlotCallback callback);
+  static void IteratePointersFromMapsToNewSpace(Address start,
+                                                Address end,
+ ObjectSlotCallback callback);


   // Returns whether the object resides in new space.
@@ -1518,31 +1505,6 @@
     }
   }
 };
-
-
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
-// Visitor class to verify interior pointers in spaces that use region marks
-// to keep track of intergenerational references.
-// As VerifyPointersVisitor but also checks that dirty marks are set
-// for regions covering intergenerational references.
-class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
- public:
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** current = start; current < end; current++) {
-      if ((*current)->IsHeapObject()) {
-        HeapObject* object = HeapObject::cast(*current);
-        ASSERT(Heap::Contains(object));
-        ASSERT(object->map()->IsMap());
-        if (Heap::InNewSpace(object)) {
-          ASSERT(Heap::InToSpace(object));
-          Address addr = reinterpret_cast<Address>(current);
-          ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
-        }
-      }
-    }
-  }
-};
-#endif
 #endif


=======================================
--- /branches/experimental/gc/src/mark-compact.cc       Mon Jan 24 06:34:54 2011
+++ /branches/experimental/gc/src/mark-compact.cc       Thu Feb  3 05:20:16 2011
@@ -101,9 +101,8 @@

   if (FLAG_collect_maps) ClearNonLiveTransitions();

-  SweepLargeObjectSpace();
-
   SweepSpaces();
+
   PcToCodeCache::FlushPcToCodeCache();

   Finish();
@@ -417,6 +416,7 @@
   // Since we don't have the object's start, it is impossible to update the
   // page dirty marks. Therefore, we only replace the string with its left
   // substring when page dirty marks do not change.
+ // TODO(gc): Seems like we could relax this restriction with store buffers.
   Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
   if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;

@@ -1428,17 +1428,6 @@
 #endif  // DEBUG


-void MarkCompactCollector::SweepLargeObjectSpace() {
-#ifdef DEBUG
-  ASSERT(state_ == MARK_LIVE_OBJECTS);
-  state_ =
-      compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
-#endif
-  // Deallocate unmarked objects and clear marked bits for marked objects.
-  Heap::lo_space()->FreeUnmarkedObjects();
-}
-
-
 // Safe to use during marking phase only.
 bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
   return object->map()->instance_type() == MAP_TYPE;
@@ -1509,14 +1498,18 @@

 // We scavange new space simultaneously with sweeping. This is done in two
 // passes.
+//
// The first pass migrates all alive objects from one semispace to another or
-// promotes them to old space. Forwading address is written directly into
-// first word of object without any encoding. If object is dead we are writing
+// promotes them to old space.  Forwarding address is written directly into
+// first word of object without any encoding.  If object is dead we write
 // NULL as a forwarding address.
-// The second pass updates pointers to new space in all spaces. It is possible -// to encounter pointers to dead objects during traversal of dirty regions we
-// should clear them to avoid encountering them during next dirty regions
-// iteration.
+//
+// The second pass updates pointers to new space in all spaces. It is possible +// to encounter pointers to dead new space objects during traversal of pointers +// to new space. We should clear them to avoid encountering them during next +// pointer iteration. This is an issue if the store buffer overflows and we
+// have to scan the entire old space, including dead objects, looking for
+// pointers to new space.
 static void MigrateObject(Address dst,
                           Address src,
                           int size,
@@ -1526,7 +1519,6 @@
   } else {
     Heap::CopyBlock(dst, src, size);
   }
-
   Memory::Address_at(src) = dst;
 }

@@ -1581,23 +1573,27 @@
 };


-// Visitor for updating pointers from live objects in old spaces to new space. -// It can encounter pointers to dead objects in new space when traversing map
-// space (see comment for MigrateObject).
-static void UpdatePointerToNewGen(HeapObject** p) {
-  if (!(*p)->IsHeapObject()) return;
-
-  Address old_addr = (*p)->address();
-  ASSERT(Heap::InFromSpace(*p));
+static void UpdatePointerToNewGen(HeapObject** p, HeapObject* object) {
+  ASSERT(Heap::InFromSpace(object));
+  ASSERT(*p == object);
+
+  Address old_addr = object->address();

   Address new_addr = Memory::Address_at(old_addr);

-  if (new_addr == NULL) {
-    // We encountered pointer to a dead object. Clear it so we will
-    // not visit it again during next iteration of dirty regions.
-    *p = NULL;
-  } else {
+  // The new space sweep will overwrite the map word of dead objects
+  // with NULL. In this case we do not need to transfer this entry to
+  // the store buffer which we are rebuilding.
+  if (new_addr != NULL) {
     *p = HeapObject::FromAddress(new_addr);
+    if (Heap::InNewSpace(new_addr)) {
+ StoreBuffer::EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(p));
+    }
+  } else {
+ // We have to zap this pointer, because the store buffer may overflow later,
+    // and then we have to scan the entire heap and we don't want to find
+    // spurious newspace pointers in the old space.
+ *p = HeapObject::FromAddress(NULL); // Fake heap object not in new space.
   }
 }

@@ -1685,6 +1681,7 @@
                     false);
     } else {
       size = object->Size();
+      // Mark dead objects in the new space with null in their map field.
       Memory::Address_at(current) = NULL;
     }
   }
@@ -1704,13 +1701,10 @@
   // Update roots.
   Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);

-  // Update pointers in old spaces.
-  Heap::IterateDirtyRegions(Heap::old_pointer_space(),
-                            &Heap::IteratePointersInDirtyRegion,
-                            &UpdatePointerToNewGen,
-                            Heap::WATERMARK_SHOULD_BE_VALID);
-
-  Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
+  {
+    StoreBufferRebuildScope scope;
+    StoreBuffer::IteratePointersToNewSpace(&UpdatePointerToNewGen);
+  }

   // Update pointers from cells.
   HeapObjectIterator cell_iterator(Heap::cell_space());
@@ -2029,8 +2023,9 @@

 void MarkCompactCollector::SweepSpaces() {
   GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
-
-  ASSERT(state_ == SWEEP_SPACES);
+#ifdef DEBUG
+  state_ = SWEEP_SPACES;
+#endif
   ASSERT(!IsCompacting());
   // Noncompacting collections simply sweep the spaces to clear the mark
   // bits and free the nonlive blocks (for old and map spaces).  We sweep
@@ -2051,12 +2046,10 @@
   // TODO(gc): Implement specialized sweeper for map space.
   SweepSpace(Heap::map_space(), PRECISE);

-  Heap::IterateDirtyRegions(Heap::map_space(),
-                            &Heap::IteratePointersInDirtyMapsRegion,
-                            &UpdatePointerToNewGen,
-                            Heap::WATERMARK_SHOULD_BE_VALID);
-
   ASSERT(live_map_objects_size_ <= Heap::map_space()->Size());
+
+  // Deallocate unmarked objects and clear marked bits for marked objects.
+  Heap::lo_space()->FreeUnmarkedObjects();
 }


=======================================
--- /branches/experimental/gc/src/mark-compact.h        Wed Jan 19 09:39:52 2011
+++ /branches/experimental/gc/src/mark-compact.h        Thu Feb  3 05:20:16 2011
@@ -473,10 +473,6 @@
   static void UpdateLiveObjectCount(HeapObject* obj);
 #endif

-  // We sweep the large object space in the same way whether we are
-  // compacting or not, because the large object space is never compacted.
-  static void SweepLargeObjectSpace();
-
   // Test whether a (possibly marked) object is a Map.
   static inline bool SafeIsMap(HeapObject* object);

=======================================
--- /branches/experimental/gc/src/objects-inl.h Mon Jan 24 06:34:54 2011
+++ /branches/experimental/gc/src/objects-inl.h Thu Feb  3 05:20:16 2011
@@ -42,6 +42,7 @@
 #include "memory.h"
 #include "property.h"
 #include "spaces.h"
+#include "store-buffer.h"

 namespace v8 {
 namespace internal {
@@ -804,8 +805,7 @@
     ASSERT(mode == SKIP_WRITE_BARRIER); \
     ASSERT(Heap::InNewSpace(object) || \
            !Heap::InNewSpace(READ_FIELD(object, offset)) || \
-           Page::FromAddress(object->address())->           \
-               IsRegionDirty(object->address() + offset));  \
+           StoreBuffer::CellIsInStoreBuffer(object->address() + offset)); \
   }

 #define READ_DOUBLE_FIELD(p, offset) \
=======================================
--- /branches/experimental/gc/src/spaces-inl.h  Thu Dec 23 08:04:56 2010
+++ /branches/experimental/gc/src/spaces-inl.h  Thu Feb  3 05:20:16 2011
@@ -83,7 +83,7 @@
     // into the promotion queue to process it later.
     // If space for object was allocated somewhere beyond allocation
// watermark this might cause garbage pointers to appear under allocation
-    // watermark. To avoid visiting them during dirty regions iteration
+ // watermark. To avoid visiting them during pointer-to-newspace iteration // which might be still in progress we store a valid allocation watermark
     // value and mark this page as having an invalid watermark.
     SetCachedAllocationWatermark(AllocationWatermark());
@@ -105,128 +105,6 @@
 Address Page::CachedAllocationWatermark() {
   return allocation_watermark_;
 }
-
-
-uint32_t Page::GetRegionMarks() {
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
-  return dirty_regions_;
-#else
-  return kAllRegionsDirtyMarks;
-#endif
-}
-
-
-void Page::SetRegionMarks(uint32_t marks) {
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
-  dirty_regions_ = marks;
-#endif
-}
-
-
-int Page::GetRegionNumberForAddress(Address addr) {
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
- // Each page is divided into 256 byte regions. Each region has a corresponding - // dirty mark bit in the page header. Region can contain intergenerational
-  // references iff its dirty mark is set.
-  // A normal 8K page contains exactly 32 regions so all region marks fit
-  // into 32-bit integer field. To calculate a region number we just divide
-  // offset inside page by region size.
-  // A large page can contain more then 32 regions. But we want to avoid
- // additional write barrier code for distinguishing between large and normal - // pages so we just ignore the fact that addr points into a large page and - // calculate region number as if addr pointed into a normal 8K page. This way - // we get a region number modulo 32 so for large pages several regions might
-  // be mapped to a single dirty mark.
-  ASSERT_PAGE_ALIGNED(this->address());
-  STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
-
-  // We are using masking with kPageAlignmentMask instead of Page::Offset()
- // to get an offset to the beginning of 8K page containing addr not to the
-  // beginning of actual page which can be bigger then 8K.
- intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
-  return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
-#else
-  return 0;
-#endif
-}
-
-
-uint32_t Page::GetRegionMaskForAddress(Address addr) {
-  return 1 << GetRegionNumberForAddress(addr);
-}
-
-
-uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
-  uint32_t result = 0;
-  if (length_in_bytes >= kPageSize) {
-    result = kAllRegionsDirtyMarks;
-  } else if (length_in_bytes > 0) {
-    int start_region = GetRegionNumberForAddress(start);
-    int end_region =
-        GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
-    uint32_t start_mask = (~0) << start_region;
-    uint32_t end_mask = ~((~1) << end_region);
-    result = start_mask & end_mask;
-    // if end_region < start_region, the mask is ored.
-    if (result == 0) result = start_mask | end_mask;
-  }
-#ifdef DEBUG
-  if (FLAG_enable_slow_asserts) {
-    uint32_t expected = 0;
- for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
-      expected |= GetRegionMaskForAddress(a);
-    }
-    ASSERT(expected == result);
-  }
-#endif
-  return result;
-#else
-  return Page::kAllRegionsDirtyMarks;
-#endif
-}
-
-
-void Page::MarkRegionDirty(Address address) {
-  SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
-}
-
-
-bool Page::IsRegionDirty(Address address) {
-  return GetRegionMarks() & GetRegionMaskForAddress(address);
-}
-
-
-void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
-  int rstart = GetRegionNumberForAddress(start);
-  int rend = GetRegionNumberForAddress(end);
-
-  if (reaches_limit) {
-    end += 1;
-  }
-
-  if ((rend - rstart) == 0) {
-    return;
-  }
-
-  uint32_t bitmask = 0;
-
-  if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
-      || (start == ObjectAreaStart())) {
-    // First region is fully covered
-    bitmask = 1 << rstart;
-  }
-
-  while (++rstart < rend) {
-    bitmask |= 1 << rstart;
-  }
-
-  if (bitmask) {
-    SetRegionMarks(GetRegionMarks() & ~bitmask);
-  }
-#endif
-}


 void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
@@ -258,7 +136,6 @@
   if (Heap::gc_state() == Heap::SCAVENGE) {
     SetCachedAllocationWatermark(ObjectAreaStart());
   }
-  SetRegionMarks(kAllRegionsCleanMarks);
 }


=======================================
--- /branches/experimental/gc/src/spaces.cc     Mon Jan 24 06:34:54 2011
+++ /branches/experimental/gc/src/spaces.cc     Thu Feb  3 05:20:16 2011
@@ -1717,7 +1717,7 @@
 void PagedSpace::FreePages(Page* prev, Page* last) {
   if (last == AllocationTopPage()) {
     // Pages are already at the end of used pages.
-    // Just mark them as continuos.
+    // Just mark them as continuous.
     Page* p = prev == NULL ? first_page_ : prev->next_page();
     Page* end_page = last->next_page();
     do {
@@ -1748,7 +1748,6 @@
     first->InvalidateWatermark(true);
     first->SetAllocationWatermark(first->ObjectAreaStart());
     first->SetCachedAllocationWatermark(first->ObjectAreaStart());
-    first->SetRegionMarks(Page::kAllRegionsCleanMarks);
     first->SetFlag(Page::IS_CONTINUOUS);
     first->markbits()->Clear();
     first = first->next_page();
@@ -2361,7 +2360,8 @@
 }


-void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
+void LargeObjectSpace::IteratePointersToNewSpace(
+    ObjectSlotCallback copy_object) {
   LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
     // We only have code, sequential strings, or fixed arrays in large
@@ -2373,7 +2373,7 @@

       Address start = object->address();
       Address object_end = start + object->Size();
-      Heap::IteratePointersInDirtyRegion(start, object_end, copy_object);
+      Heap::IteratePointersToNewSpace(start, object_end, copy_object);
     }
   }
 }
@@ -2463,9 +2463,6 @@
                           object->Size(),
                           &code_visitor);
     } else if (object->IsFixedArray()) {
- // We loop over fixed arrays ourselves, rather then using the visitor,
-      // because the visitor doesn't support the start/offset iteration
-      // needed for IsRegionDirty.
       FixedArray* array = FixedArray::cast(object);
       for (int j = 0; j < array->length(); j++) {
         Object* element = array->get(j);
@@ -2473,13 +2470,6 @@
           HeapObject* element_object = HeapObject::cast(element);
           ASSERT(Heap::Contains(element_object));
           ASSERT(element_object->map()->IsMap());
-          if (Heap::InNewSpace(element_object)) {
-            Address array_addr = object->address();
-            Address element_addr = array_addr + FixedArray::kHeaderSize +
-                j * kPointerSize;
-
- ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
-          }
         }
       }
     }
=======================================
--- /branches/experimental/gc/src/spaces.h      Mon Jan 24 06:34:54 2011
+++ /branches/experimental/gc/src/spaces.h      Thu Feb  3 05:20:16 2011
@@ -46,34 +46,26 @@
 //
 // The semispaces of the young generation are contiguous.  The old and map
// spaces consists of a list of pages. A page has a page header and an object
-// area. A page size is deliberately chosen as 8K bytes.
-// The first word of a page is an opaque page header that has the
-// address of the next page and its ownership information. The second word may -// have the allocation top address of this page. Heap objects are aligned to the
-// pointer size.
+// area.
 //
 // There is a separate large object space for objects larger than
 // Page::kMaxHeapObjectSize, so that they do not have to move during
 // collection. The large object space is paged. Pages in large object space
-// may be larger than 8K.
+// may be larger than the page size.
 //
-// A card marking write barrier is used to keep track of intergenerational
-// references. Old space pages are divided into regions of Page::kRegionSize -// size. Each region has a corresponding dirty bit in the page header which is
-// set if the region might contain pointers to new space. For details about
-// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
-// method body.
+// A store-buffer based write barrier is used to keep track of intergenerational
+// references.  See store-buffer.h.
 //
-// During scavenges and mark-sweep collections we iterate intergenerational
-// pointers without decoding heap object maps so if the page belongs to old
-// pointer space or large object space it is essential to guarantee that
-// the page does not contain any garbage pointers to new space: every pointer
-// aligned word which satisfies the Heap::InNewSpace() predicate must be a
-// pointer to a live heap object in new space. Thus objects in old pointer
-// and large object spaces should have a special layout (e.g. no bare integer -// fields). This requirement does not apply to map space which is iterated in -// a special fashion. However we still require pointer fields of dead maps to
-// be cleaned.
+// During scavenges and mark-sweep collections we sometimes (after a store
+// buffer overflow) iterate intergenerational pointers without decoding heap
+// object maps so if the page belongs to old pointer space or large object
+// space it is essential to guarantee that the page does not contain any
+// garbage pointers to new space: every pointer aligned word which satisfies +// the Heap::InNewSpace() predicate must be a pointer to a live heap object in +// new space. Thus objects in old pointer and large object spaces should have a
+// special layout (e.g. no bare integer fields). This requirement does not
+// apply to map space which is iterated in a special fashion. However we still
+// require pointer fields of dead maps to be cleaned.
 //
 // To enable lazy cleaning of old space pages we use a notion of allocation
// watermark. Every pointer under watermark is considered to be well formed.
@@ -498,24 +490,6 @@
   }

   // ---------------------------------------------------------------------
-  // Card marking support
-
-  static const uint32_t kAllRegionsCleanMarks = 0x0;
-  static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
-
-  inline uint32_t GetRegionMarks();
-  inline void SetRegionMarks(uint32_t dirty);
-
-  inline uint32_t GetRegionMaskForAddress(Address addr);
-  inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
-  inline int GetRegionNumberForAddress(Address addr);
-
-  inline void MarkRegionDirty(Address addr);
-  inline bool IsRegionDirty(Address addr);
-
-  inline void ClearRegionMarks(Address start,
-                               Address end,
-                               bool reaches_limit);

   // Page size in bytes.  This must be a multiple of the OS page size.
   static const int kPageSize = 1 << kPageSizeBits;
@@ -541,15 +515,6 @@
       MarkbitsBitmap::kBitsPerCellLog2;


-#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
-  static const int kDirtyFlagOffset = 2 * kPointerSize;
-  static const int kRegionSizeLog2 = 8;
-  static const int kRegionSize = 1 << kRegionSizeLog2;
-  static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
-
-  STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
-#endif
-
   enum PageFlag {
// Page allocation watermark was bumped by preallocation during scavenge. // Correct watermark can be retrieved by CachedAllocationWatermark() method
@@ -2160,54 +2125,7 @@
   // Should be called after forced sweep to find out if map space needs
   // compaction.
   bool NeedsCompaction(int live_maps) {
-    return !MapPointersEncodable() && live_maps <= CompactionThreshold();
-  }
-
-  Address TopAfterCompaction(int live_maps) {
-    ASSERT(NeedsCompaction(live_maps));
-
-    int pages_left = live_maps / kMapsPerPage;
-    PageIterator it(this, PageIterator::ALL_PAGES);
-    while (pages_left-- > 0) {
-      ASSERT(it.has_next());
-      it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    }
-    ASSERT(it.has_next());
-    Page* top_page = it.next();
-    top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
-    ASSERT(top_page->is_valid());
-
-    int offset = live_maps % kMapsPerPage * Map::kSize;
-    Address top = top_page->ObjectAreaStart() + offset;
-    ASSERT(top < top_page->ObjectAreaEnd());
-    ASSERT(Contains(top));
-
-    return top;
-  }
-
-  void FinishCompaction(Address new_top, int live_maps) {
-    Page* top_page = Page::FromAddress(new_top);
-    ASSERT(top_page->is_valid());
-
-    SetAllocationInfo(&allocation_info_, top_page);
-    allocation_info_.top = new_top;
-
-    int new_size = live_maps * Map::kSize;
-    accounting_stats_.DeallocateBytes(accounting_stats_.Size());
-    accounting_stats_.AllocateBytes(new_size);
-
-#ifdef DEBUG
-    if (FLAG_enable_slow_asserts) {
-      intptr_t actual_size = 0;
-      for (Page* p = first_page_; p != top_page; p = p->next_page())
-        actual_size += kMapsPerPage * Map::kSize;
-      actual_size += (new_top - top_page->ObjectAreaStart());
-      ASSERT(accounting_stats_.Size() == actual_size);
-    }
-#endif
-
-    Shrink();
-    ResetFreeList();
+    return false;  // TODO(gc): Bring back map compaction.
   }

  protected:
@@ -2305,8 +2223,8 @@
   // if such a page doesn't exist.
   LargePage* FindPageContainingPc(Address pc);

-  // Iterates objects covered by dirty regions.
-  void IterateDirtyRegions(ObjectSlotCallback func);
+  // Iterates over pointers to new space.
+  void IteratePointersToNewSpace(ObjectSlotCallback func);

   // Frees unmarked objects.
   void FreeUnmarkedObjects();
=======================================
--- /branches/experimental/gc/src/store-buffer-inl.h Thu Jan 6 06:05:23 2011 +++ /branches/experimental/gc/src/store-buffer-inl.h Thu Feb 3 05:20:16 2011
@@ -50,6 +50,21 @@
     ASSERT(top < limit_);
   }
 }
+
+
+void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
+  if (store_buffer_rebuilding_enabled_) {
+    Address* top = old_top_;
+    *top++ = addr;
+    old_top_ = top;
+    if (top >= old_limit_) {
+      Counters::store_buffer_overflows.Increment();
+      store_buffer_mode_ = kStoreBufferDisabled;
+      old_top_ = old_start_;
+    }
+  }
+}
+

 } }  // namespace v8::internal

=======================================
--- /branches/experimental/gc/src/store-buffer.cc       Mon Jan 24 06:34:54 2011
+++ /branches/experimental/gc/src/store-buffer.cc       Thu Feb  3 05:20:16 2011
@@ -40,9 +40,12 @@
 uintptr_t* StoreBuffer::hash_map_1_ = NULL;
 uintptr_t* StoreBuffer::hash_map_2_ = NULL;
 VirtualMemory* StoreBuffer::virtual_memory_ = NULL;
-bool StoreBuffer::must_scan_entire_memory_ = false;
+StoreBuffer::StoreBufferMode StoreBuffer::store_buffer_mode_ =
+    kStoreBufferFunctional;
 bool StoreBuffer::old_buffer_is_sorted_ = false;
 bool StoreBuffer::during_gc_ = false;
+bool StoreBuffer::store_buffer_rebuilding_enabled_ = false;
+bool StoreBuffer::may_move_store_buffer_entries_ = true;

 void StoreBuffer::Setup() {
   virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
@@ -126,10 +129,11 @@
   // Remove adjacent duplicates and cells that do not point at new space.
   Address previous = NULL;
   Address* write = old_start_;
+  ASSERT(may_move_store_buffer_entries_);
   for (Address* read = old_start_; read < old_top_; read++) {
     Address current = *read;
     if (current != previous) {
-      if (Heap::InNewSpace(*reinterpret_cast<Address*>(current))) {
+      if (Heap::InNewSpace(*reinterpret_cast<Object**>(current))) {
         *write++ = current;
       }
     }
@@ -142,6 +146,10 @@
 void StoreBuffer::SortUniq() {
   Compact();
   if (old_buffer_is_sorted_) return;
+  if (store_buffer_mode_ == kStoreBufferDisabled) {
+    old_top_ = old_start_;
+    return;
+  }
   ZapHashTables();
   qsort(reinterpret_cast<void*>(old_start_),
         old_top_ - old_start_,
@@ -155,9 +163,7 @@

 #ifdef DEBUG
 void StoreBuffer::Clean() {
-  if (must_scan_entire_memory_) {
-    // We don't currently have a way to go back to using the store buffer.
-    // TODO(gc): We should rebuild the store buffer during GC.
+  if (store_buffer_mode_ == kStoreBufferDisabled) {
     old_top_ = old_start_;  // Just clear the cache.
     return;
   }
@@ -183,6 +189,31 @@
 }


+static Address* in_store_buffer_1_element_cache = NULL;
+
+
+bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
+  if (!FLAG_enable_slow_asserts) return true;
+  if (store_buffer_mode_ != kStoreBufferFunctional) return true;
+  if (in_store_buffer_1_element_cache != NULL &&
+      *in_store_buffer_1_element_cache == cell_address) {
+    return true;
+  }
+  Address* top = reinterpret_cast<Address*>(Heap::store_buffer_top());
+  for (Address* current = top - 1; current >= start_; current--) {
+    if (*current == cell_address) {
+      in_store_buffer_1_element_cache = current;
+      return true;
+    }
+  }
+  for (Address* current = old_top_ - 1; current >= old_start_; current--) {
+    if (*current == cell_address) {
+      in_store_buffer_1_element_cache = current;
+      return true;
+    }
+  }
+  return false;
+}
 #endif


@@ -199,16 +230,13 @@
 void StoreBuffer::GCPrologue(GCType type, GCCallbackFlags flags) {
   ZapHashTables();
   during_gc_ = true;
-  if (type != kGCTypeScavenge) {
-    old_top_ = old_start_;
-    Heap::public_set_store_buffer_top(start_);
-  }
 }


 void StoreBuffer::Verify() {
 #ifdef DEBUG
-  if (FLAG_verify_heap && !StoreBuffer::must_scan_entire_memory()) {
+  if (FLAG_verify_heap &&
+      StoreBuffer::store_buffer_mode_ == kStoreBufferFunctional) {
     Heap::OldPointerSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID);
     Heap::MapSpaceCheckStoreBuffer(Heap::WATERMARK_SHOULD_BE_VALID);
     Heap::LargeObjectSpaceCheckStoreBuffer();
@@ -219,16 +247,72 @@

 void StoreBuffer::GCEpilogue(GCType type, GCCallbackFlags flags) {
   during_gc_ = false;
+  if (store_buffer_mode_ == kStoreBufferBeingRebuilt) {
+    store_buffer_mode_ = kStoreBufferFunctional;
+  }
   Verify();
 }
+
+
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback callback) {
+  if (store_buffer_mode_ != kStoreBufferFunctional) {
+    old_top_ = old_start_;
+    ZapHashTables();
+    Heap::public_set_store_buffer_top(start_);
+    store_buffer_mode_ = kStoreBufferBeingRebuilt;
+    Heap::IteratePointers(Heap::old_pointer_space(),
+                          &Heap::IteratePointersToNewSpace,
+                          callback,
+                          Heap::WATERMARK_SHOULD_BE_VALID);
+
+    Heap::IteratePointers(Heap::map_space(),
+                          &Heap::IteratePointersFromMapsToNewSpace,
+                          callback,
+                          Heap::WATERMARK_SHOULD_BE_VALID);
+
+    Heap::lo_space()->IteratePointersToNewSpace(callback);
+  } else {
+    SortUniq();
+    Address* limit = old_top_;
+    old_top_ = old_start_;
+    {
+      DontMoveStoreBufferEntriesScope scope;
+      for (Address* current = old_start_; current < limit; current++) {
+#ifdef DEBUG
+        Address* saved_top = old_top_;
+#endif
+        Object** cell = reinterpret_cast<Object**>(*current);
+        Object* object = *cell;
+        // May be invalid if object is not in new space.
+        HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+        if (Heap::InNewSpace(object)) {
+          callback(reinterpret_cast<HeapObject**>(cell), heap_object);
+        }
+        ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
+        ASSERT((old_top_ == saved_top + 1) ==
+               (Heap::InNewSpace(*cell) &&
+                   !Heap::InNewSpace(reinterpret_cast<Address>(cell)) &&
+                   Memory::Address_at(heap_object->address()) != NULL));
+      }
+    }
+  }
+}


 void StoreBuffer::Compact() {
   Address* top = reinterpret_cast<Address*>(Heap::store_buffer_top());
+
   if (top == start_) return;
+
+  // There's no check of the limit in the loop below so we check here for
+  // the worst case (compaction doesn't eliminate any pointers).
   ASSERT(top <= limit_);
   Heap::public_set_store_buffer_top(start_);
-  if (must_scan_entire_memory_) return;
+  if (top - start_ > old_limit_ - old_top_) {
+    CheckForFullBuffer();
+  }
+  if (store_buffer_mode_ == kStoreBufferDisabled) return;
+  ASSERT(may_move_store_buffer_entries_);
   // Goes through the addresses in the store buffer attempting to remove
   // duplicates.  In the interest of speed this is a lossy operation.  Some
   // duplicates will remain.  We have two hash tables with different hash
@@ -281,7 +365,7 @@
       // compression to be guaranteed to succeed.
       // TODO(gc): Set a flag to scan all of memory.
       Counters::store_buffer_overflows.Increment();
-      must_scan_entire_memory_ = true;
+      store_buffer_mode_ = kStoreBufferDisabled;
     }
   }
 }
=======================================
--- /branches/experimental/gc/src/store-buffer.h        Mon Jan 24 06:34:54 2011
+++ /branches/experimental/gc/src/store-buffer.h        Thu Feb  3 05:20:16 2011
@@ -36,6 +36,7 @@
 namespace v8 {
 namespace internal {

+typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);

 // Used to implement the write barrier by collecting addresses of pointers
 // between spaces.
@@ -46,8 +47,32 @@
   static void Setup();
   static void TearDown();

+  // This is used by the mutator to enter addresses into the store buffer.
   static inline void Mark(Address addr);

+ // This is used by the heap traversal to enter the addresses into the store
+  // buffer that should still be in the store buffer after GC.  It enters
+ // addresses directly into the old buffer because the GC starts by wiping the + // old buffer and thereafter only visits each cell once so there is no need + // to attempt to remove any dupes. During the first part of a scavenge we + // are using the store buffer to access the old spaces and at the same time + // we are rebuilding the store buffer using this function. There is, however
+  // no issue of overwriting the buffer we are iterating over, because this
+ // stage of the scavenge can only reduce the number of addresses in the store + // buffer (some objects are promoted so pointers to them do not need to be in + // the store buffer). The later parts of the scavenge process the promotion
+  // queue and they can overflow this buffer, which we must check for.
+  static inline void EnterDirectlyIntoStoreBuffer(Address addr);
+
+  enum RebuildStoreBufferMode {
+    kRebuildStoreBufferWhileIterating,
+    kPreserveStoreBufferWhileIterating};
+
+ // Iterates over all pointers that go from old space to new space. It will
+  // delete the store buffer as it starts so the callback should reenter
+  // surviving old-to-new pointers into the store buffer to rebuild it.
+  static void IteratePointersToNewSpace(ObjectSlotCallback callback);
+
   static const int kStoreBufferOverflowBit = 1 << 16;
   static const int kStoreBufferSize = kStoreBufferOverflowBit;
   static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
@@ -62,7 +87,13 @@
static Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
   static Object*** Top() { return reinterpret_cast<Object***>(old_top_); }

- static bool must_scan_entire_memory() { return must_scan_entire_memory_; }
+  enum StoreBufferMode {
+    kStoreBufferFunctional,
+    kStoreBufferDisabled,
+    kStoreBufferBeingRebuilt
+  };
+
+  static StoreBufferMode store_buffer_mode() { return store_buffer_mode_; }
   static bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }

   // Goes through the store buffer removing pointers to things that have
@@ -72,6 +103,8 @@

 #ifdef DEBUG
   static void Clean();
+  // Slow, for asserts only.
+  static bool CellIsInStoreBuffer(Address cell);
 #endif

  private:
@@ -86,8 +119,10 @@
   static Address* old_top_;

   static bool old_buffer_is_sorted_;
-  static bool must_scan_entire_memory_;
+  static StoreBufferMode store_buffer_mode_;
   static bool during_gc_;
+  static bool store_buffer_rebuilding_enabled_;
+  static bool may_move_store_buffer_entries_;

   static VirtualMemory* virtual_memory_;
   static uintptr_t* hash_map_1_;
@@ -97,6 +132,42 @@
   static void Uniq();
   static void ZapHashTables();
   static bool HashTablesAreZapped();
+
+  friend class StoreBufferRebuildScope;
+  friend class DontMoveStoreBufferEntriesScope;
+};
+
+
+class StoreBufferRebuildScope {
+ public:
+  StoreBufferRebuildScope() :
+      stored_state_(StoreBuffer::store_buffer_rebuilding_enabled_) {
+    StoreBuffer::store_buffer_rebuilding_enabled_ = true;
+  }
+
+  ~StoreBufferRebuildScope() {
+    StoreBuffer::store_buffer_rebuilding_enabled_ = stored_state_;
+    StoreBuffer::CheckForFullBuffer();
+  }
+
+ private:
+  bool stored_state_;
+};
+
+
+class DontMoveStoreBufferEntriesScope {
+ public:
+  DontMoveStoreBufferEntriesScope() :
+      stored_state_(StoreBuffer::may_move_store_buffer_entries_) {
+    StoreBuffer::may_move_store_buffer_entries_ = false;
+  }
+
+  ~DontMoveStoreBufferEntriesScope() {
+    StoreBuffer::may_move_store_buffer_entries_ = stored_state_;
+  }
+
+ private:
+  bool stored_state_;
 };

 } }  // namespace v8::internal
=======================================
--- /branches/experimental/gc/src/v8globals.h   Thu Dec 23 08:04:56 2010
+++ /branches/experimental/gc/src/v8globals.h   Thu Feb  3 05:20:16 2011
@@ -249,7 +249,7 @@
// Callback function on object slots, used for iterating heap object slots in // HeapObjects, global pointers to heap objects, etc. The callback allows the
 // callback function to change the value of the slot.
-typedef void (*ObjectSlotCallback)(HeapObject** pointer);
+typedef void (*ObjectSlotCallback)(HeapObject** pointer, HeapObject* object);


 // Callback function used for iterating objects in heap spaces,
=======================================
--- /branches/experimental/gc/test/cctest/test-heap.cc Thu Dec 23 08:04:56 2010 +++ /branches/experimental/gc/test/cctest/test-heap.cc Thu Feb 3 05:20:16 2011
@@ -911,12 +911,6 @@
     return;
   }
   CHECK(Heap::old_pointer_space()->Contains(clone->address()));
-
-  // Step 5: verify validity of region dirty marks.
-  Address clone_addr = clone->address();
-  Page* page = Page::FromAddress(clone_addr);
-  // Check that region covering inobject property 1 is marked dirty.
-  CHECK(page->IsRegionDirty(clone_addr + (object_size - kPointerSize)));
 }


--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to