Revision: 4476
Author: [email protected]
Date: Thu Apr 22 09:43:38 2010
Log: Fix bugs introduced by r4475:
- RelinkPageListInChunkOrder might relink unused pages into the middle of a sequence of used pages. Filler objects should be placed at the beginning of such unused pages otherwise generic iterators (e.g. HeapObjectIterator) would not handle them correctly. - ObjectAreaEnd() should not be used as an allocation limit for pages from FixedSpace. Pages in such spaces do not use top page_extra_ bytes of object area.

[email protected]
Review URL: http://codereview.chromium.org/1700005
http://code.google.com/p/v8/source/detail?r=4476

Modified:
 /branches/bleeding_edge/src/mark-compact.cc
 /branches/bleeding_edge/src/spaces.cc
 /branches/bleeding_edge/src/spaces.h
 /branches/bleeding_edge/test/cctest/test-heap.cc

=======================================
--- /branches/bleeding_edge/src/mark-compact.cc Thu Apr 22 07:42:21 2010
+++ /branches/bleeding_edge/src/mark-compact.cc Thu Apr 22 09:43:38 2010
@@ -1056,21 +1056,15 @@
   while (it.has_next()) {
     Page* p = it.next();

-    if (p->WasInUseBeforeMC()) {
- // The offset of each live object in the page from the first live object
-      // in the page.
-      int offset = 0;
-      EncodeForwardingAddressesInRange<Alloc,
-                                       EncodeForwardingAddressInPagedSpace,
-                                       ProcessNonLive>(
-          p->ObjectAreaStart(),
-          p->AllocationTop(),
-          &offset);
-    } else {
-      // Mark whole unused page as a free region.
-      EncodeFreeRegion(p->ObjectAreaStart(),
-                       p->AllocationTop() - p->ObjectAreaStart());
-    }
+ // The offset of each live object in the page from the first live object
+    // in the page.
+    int offset = 0;
+    EncodeForwardingAddressesInRange<Alloc,
+                                     EncodeForwardingAddressInPagedSpace,
+                                     ProcessNonLive>(
+        p->ObjectAreaStart(),
+        p->AllocationTop(),
+        &offset);
   }
 }

@@ -1397,17 +1391,17 @@
   }

   if (new_allocation_top != NULL) {
+#ifdef DEBUG
Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
-
     ASSERT(((first_empty_page == NULL) &&
             (new_allocation_top_page == space->AllocationTopPage())) ||
            ((first_empty_page != NULL) && (last_free_size > 0) &&
             (new_allocation_top_page == prec_first_empty_page)) ||
            ((first_empty_page != NULL) && (last_free_size == 0) &&
             (new_allocation_top_page == first_empty_page)));
-
-    space->SetTop(new_allocation_top,
-                  new_allocation_top_page->ObjectAreaEnd());
+#endif
+
+    space->SetTop(new_allocation_top);
   }
 }

=======================================
--- /branches/bleeding_edge/src/spaces.cc       Thu Apr 22 07:42:21 2010
+++ /branches/bleeding_edge/src/spaces.cc       Thu Apr 22 09:43:38 2010
@@ -1039,7 +1039,7 @@
         // The next page will be above the allocation top.
         above_allocation_top = true;
       } else {
-        ASSERT(top == current_page->ObjectAreaEnd() - page_extra_);
+        ASSERT(top == PageAllocationLimit(current_page));
       }

       // It should be packed with objects from the bottom to the top.
@@ -1977,12 +1977,12 @@
   if (will_compact) {
     // MarkCompact collector relies on WAS_IN_USE_BEFORE_MC page flag
     // to skip unused pages. Update flag value for all pages in space.
-    PageIterator it(this, PageIterator::ALL_PAGES);
+    PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES);
     Page* last_in_use = AllocationTopPage();
     bool in_use = true;

-    while (it.has_next()) {
-      Page* p = it.next();
+    while (all_pages_iterator.has_next()) {
+      Page* p = all_pages_iterator.next();
       p->SetWasInUseBeforeMC(in_use);
       if (p == last_in_use) {
         // We passed a page containing allocation top. All consequent
@@ -2005,7 +2005,7 @@
// used page so various object iterators will continue to work properly.

         int size_in_bytes =
-            last_in_use->ObjectAreaEnd() - last_in_use->AllocationTop();
+ PageAllocationLimit(last_in_use) - last_in_use->AllocationTop();

         if (size_in_bytes > 0) {
           // There is still some space left on this page. Create a fake
@@ -2015,16 +2015,28 @@

           FreeListNode* node =
               FreeListNode::FromAddress(last_in_use->AllocationTop());
-          node->set_size(last_in_use->ObjectAreaEnd() -
-                         last_in_use->AllocationTop());
+          node->set_size(size_in_bytes);
         }

         // New last in use page was in the middle of the list before
         // sorting so it full.
-        SetTop(new_last_in_use->AllocationTop(),
-               new_last_in_use->AllocationTop());
+        SetTop(new_last_in_use->AllocationTop());

         ASSERT(AllocationTopPage() == new_last_in_use);
+        ASSERT(AllocationTopPage()->WasInUseBeforeMC());
+      }
+
+      PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
+      while (pages_in_use_iterator.has_next()) {
+        Page* p = pages_in_use_iterator.next();
+        if (!p->WasInUseBeforeMC()) {
+          // Empty page is in the middle of a sequence of used pages.
+ // Create a fake object which will occupy all free space on this page. + // Otherwise iterators would not be able to scan this page correctly.
+          FreeListNode* node =
+              FreeListNode::FromAddress(p->ObjectAreaStart());
+          node->set_size(PageAllocationLimit(p) - p->ObjectAreaStart());
+        }
       }

       page_list_is_chunk_ordered_ = true;
@@ -2544,7 +2556,7 @@
 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
                                            int size_in_bytes) {
   ASSERT(current_page->next_page()->is_valid());
- ASSERT(current_page->ObjectAreaEnd() - allocation_info_.top == page_extra_);
+  ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
   ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
   accounting_stats_.WasteBytes(page_extra_);
   SetAllocationInfo(&allocation_info_, current_page->next_page());
=======================================
--- /branches/bleeding_edge/src/spaces.h        Thu Apr 22 07:42:21 2010
+++ /branches/bleeding_edge/src/spaces.h        Thu Apr 22 09:43:38 2010
@@ -927,7 +927,14 @@
   // Prepares for a mark-compact GC.
   virtual void PrepareForMarkCompact(bool will_compact);

-  virtual Address PageAllocationTop(Page* page) = 0;
+ // The top of allocation in a page in this space. Undefined if page is unused.
+  Address PageAllocationTop(Page* page) {
+    return page == TopPageOf(allocation_info_) ? top()
+        : PageAllocationLimit(page);
+  }
+
+  // The limit of allocation for a page in this space.
+  virtual Address PageAllocationLimit(Page* page) = 0;

   // Current capacity without growing (Size() + Available() + Waste()).
   int Capacity() { return accounting_stats_.Capacity(); }
@@ -970,9 +977,9 @@
   void FreePages(Page* prev, Page* last);

   // Set space allocation info.
-  void SetTop(Address top, Address limit) {
+  void SetTop(Address top) {
     allocation_info_.top = top;
-    allocation_info_.limit = limit;
+ allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
   }

// ---------------------------------------------------------------------------
@@ -1724,9 +1731,9 @@
   // pointer).
   int AvailableFree() { return free_list_.available(); }

- // The top of allocation in a page in this space. Undefined if page is unused.
-  virtual Address PageAllocationTop(Page* page) {
- return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
+  // The limit of allocation for a page in this space.
+  virtual Address PageAllocationLimit(Page* page) {
+    return page->ObjectAreaEnd();
   }

   // Give a block of memory to the space's free list.  It might be added to
@@ -1792,10 +1799,9 @@
     page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
   }

- // The top of allocation in a page in this space. Undefined if page is unused.
-  virtual Address PageAllocationTop(Page* page) {
-    return page == TopPageOf(allocation_info_) ? top()
-        : page->ObjectAreaEnd() - page_extra_;
+  // The limit of allocation for a page in this space.
+  virtual Address PageAllocationLimit(Page* page) {
+    return page->ObjectAreaEnd() - page_extra_;
   }

   int object_size_in_bytes() { return object_size_in_bytes_; }
=======================================
--- /branches/bleeding_edge/test/cctest/test-heap.cc Thu Apr 22 07:42:21 2010 +++ /branches/bleeding_edge/test/cctest/test-heap.cc Thu Apr 22 09:43:38 2010
@@ -830,7 +830,7 @@
   }
   CHECK(bytes_to_page > FixedArray::kHeaderSize);

-  intptr_t* flags_ptr = &Page::FromAddress(next_page)->flags;
+  int* flags_ptr = &Page::FromAddress(next_page)->flags;
   Address flags_addr = reinterpret_cast<Address>(flags_ptr);

   int bytes_to_allocate =

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to