Revision: 10359
Author:   [email protected]
Date:     Tue Jan 10 00:50:39 2012
Log:      Merge r10334 and r10273 from the bleeding_edge to the 3.7 branch.

Ensure that store buffer filtering hash sets are cleared after StoreBuffer::Filter.

[email protected]

Review URL: http://codereview.chromium.org/8964025
------------------------------------------------------------------------

Do not delay sweeping of pages that are completely free.

[email protected]

Review URL: http://codereview.chromium.org/8997004
------------------------------------------------------------------------
Review URL: http://codereview.chromium.org/9159001
http://code.google.com/p/v8/source/detail?r=10359

Modified:
 /branches/3.7/src/mark-compact.cc
 /branches/3.7/src/spaces.cc
 /branches/3.7/src/store-buffer.cc
 /branches/3.7/src/store-buffer.h
 /branches/3.7/src/version.cc

=======================================
--- /branches/3.7/src/mark-compact.cc   Fri Dec  9 04:01:59 2011
+++ /branches/3.7/src/mark-compact.cc   Tue Jan 10 00:50:39 2012
@@ -3601,14 +3601,6 @@
       // Will be processed in EvacuateNewSpaceAndCandidates.
       continue;
     }
-
-    if (lazy_sweeping_active) {
-      if (FLAG_gc_verbose) {
-        PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
-               reinterpret_cast<intptr_t>(p));
-      }
-      continue;
-    }

// One unused page is kept, all further are released before sweeping them.
     if (p->LiveBytes() == 0) {
@@ -3622,6 +3614,14 @@
       }
       unused_page_present = true;
     }
+
+    if (lazy_sweeping_active) {
+      if (FLAG_gc_verbose) {
+        PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
+               reinterpret_cast<intptr_t>(p));
+      }
+      continue;
+    }

     switch (sweeper) {
       case CONSERVATIVE: {
=======================================
--- /branches/3.7/src/spaces.cc Mon Dec  5 09:05:37 2011
+++ /branches/3.7/src/spaces.cc Tue Jan 10 00:50:39 2012
@@ -752,7 +752,7 @@
 void PagedSpace::ReleasePage(Page* page) {
   ASSERT(page->LiveBytes() == 0);

-  // Adjust list of unswept pages if the page is it's head or tail.
+  // Adjust list of unswept pages if the page is the head of the list.
   if (first_unswept_page_ == page) {
     first_unswept_page_ = page->next_page();
     if (first_unswept_page_ == anchor()) {
=======================================
--- /branches/3.7/src/store-buffer.cc   Thu Oct 27 00:38:48 2011
+++ /branches/3.7/src/store-buffer.cc   Tue Jan 10 00:50:39 2012
@@ -48,8 +48,9 @@
       callback_(NULL),
       may_move_store_buffer_entries_(true),
       virtual_memory_(NULL),
-      hash_map_1_(NULL),
-      hash_map_2_(NULL) {
+      hash_set_1_(NULL),
+      hash_set_2_(NULL),
+      hash_sets_are_empty_(true) {
 }


@@ -81,17 +82,18 @@
                           false);  // Not executable.
   heap_->public_set_store_buffer_top(start_);

-  hash_map_1_ = new uintptr_t[kHashMapLength];
-  hash_map_2_ = new uintptr_t[kHashMapLength];
-
-  ZapHashTables();
+  hash_set_1_ = new uintptr_t[kHashSetLength];
+  hash_set_2_ = new uintptr_t[kHashSetLength];
+  hash_sets_are_empty_ = false;
+
+  ClearFilteringHashSets();
 }


 void StoreBuffer::TearDown() {
   delete virtual_memory_;
-  delete[] hash_map_1_;
-  delete[] hash_map_2_;
+  delete[] hash_set_1_;
+  delete[] hash_set_2_;
   delete[] old_start_;
   old_start_ = old_top_ = old_limit_ = NULL;
   start_ = limit_ = NULL;
@@ -132,7 +134,6 @@


 void StoreBuffer::Uniq() {
-  ASSERT(HashTablesAreZapped());
   // Remove adjacent duplicates and cells that do not point at new space.
   Address previous = NULL;
   Address* write = old_start_;
@@ -245,13 +246,16 @@
     }
   }
   old_top_ = new_top;
+
+  // Filtering hash sets are inconsistent with the store buffer after this
+  // operation.
+  ClearFilteringHashSets();
 }


 void StoreBuffer::SortUniq() {
   Compact();
   if (old_buffer_is_sorted_) return;
-  ZapHashTables();
   qsort(reinterpret_cast<void*>(old_start_),
         old_top_ - old_start_,
         sizeof(*old_top_),
@@ -259,6 +263,10 @@
   Uniq();

   old_buffer_is_sorted_ = true;
+
+  // Filtering hash sets are inconsistent with the store buffer after this
+  // operation.
+  ClearFilteringHashSets();
 }


@@ -274,33 +282,21 @@
   if (page_has_scan_on_scavenge_flag) {
     Filter(MemoryChunk::SCAN_ON_SCAVENGE);
   }
-  ZapHashTables();
+
+  // Filtering hash sets are inconsistent with the store buffer after
+  // iteration.
+  ClearFilteringHashSets();
+
   return page_has_scan_on_scavenge_flag;
 }


 #ifdef DEBUG
 void StoreBuffer::Clean() {
-  ZapHashTables();
+  ClearFilteringHashSets();
   Uniq();  // Also removes things that no longer point to new space.
   CheckForFullBuffer();
 }
-
-
-static bool Zapped(char* start, int size) {
-  for (int i = 0; i < size; i++) {
-    if (start[i] != 0) return false;
-  }
-  return true;
-}
-
-
-bool StoreBuffer::HashTablesAreZapped() {
-  return Zapped(reinterpret_cast<char*>(hash_map_1_),
-                sizeof(uintptr_t) * kHashMapLength) &&
-      Zapped(reinterpret_cast<char*>(hash_map_2_),
-             sizeof(uintptr_t) * kHashMapLength);
-}


 static Address* in_store_buffer_1_element_cache = NULL;
@@ -330,18 +326,21 @@
 #endif


-void StoreBuffer::ZapHashTables() {
-  memset(reinterpret_cast<void*>(hash_map_1_),
-         0,
-         sizeof(uintptr_t) * kHashMapLength);
-  memset(reinterpret_cast<void*>(hash_map_2_),
-         0,
-         sizeof(uintptr_t) * kHashMapLength);
+void StoreBuffer::ClearFilteringHashSets() {
+  if (!hash_sets_are_empty_) {
+    memset(reinterpret_cast<void*>(hash_set_1_),
+           0,
+           sizeof(uintptr_t) * kHashSetLength);
+    memset(reinterpret_cast<void*>(hash_set_2_),
+           0,
+           sizeof(uintptr_t) * kHashSetLength);
+    hash_sets_are_empty_ = true;
+  }
 }


 void StoreBuffer::GCPrologue() {
-  ZapHashTables();
+  ClearFilteringHashSets();
   during_gc_ = true;
 }

@@ -651,8 +650,9 @@
   ASSERT(may_move_store_buffer_entries_);
   // Goes through the addresses in the store buffer attempting to remove
   // duplicates.  In the interest of speed this is a lossy operation.  Some
-  // duplicates will remain.  We have two hash tables with different hash
+  // duplicates will remain.  We have two hash sets with different hash
   // functions to reduce the number of unnecessary clashes.
+  hash_sets_are_empty_ = false;  // Hash sets are in use.
   for (Address* current = start_; current < top; current++) {
     ASSERT(!heap_->cell_space()->Contains(*current));
     ASSERT(!heap_->code_space()->Contains(*current));
@@ -661,21 +661,21 @@
     // Shift out the last bits including any tags.
     int_addr >>= kPointerSizeLog2;
     int hash1 =
- ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
-    if (hash_map_1_[hash1] == int_addr) continue;
+ ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
+    if (hash_set_1_[hash1] == int_addr) continue;
     int hash2 =
- ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
-    hash2 ^= hash2 >> (kHashMapLengthLog2 * 2);
-    if (hash_map_2_[hash2] == int_addr) continue;
-    if (hash_map_1_[hash1] == 0) {
-      hash_map_1_[hash1] = int_addr;
-    } else if (hash_map_2_[hash2] == 0) {
-      hash_map_2_[hash2] = int_addr;
+ ((int_addr - (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
+    hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
+    if (hash_set_2_[hash2] == int_addr) continue;
+    if (hash_set_1_[hash1] == 0) {
+      hash_set_1_[hash1] = int_addr;
+    } else if (hash_set_2_[hash2] == 0) {
+      hash_set_2_[hash2] = int_addr;
     } else {
// Rather than slowing down we just throw away some entries. This will
       // cause some duplicates to remain undetected.
-      hash_map_1_[hash1] = int_addr;
-      hash_map_2_[hash2] = 0;
+      hash_set_1_[hash1] = int_addr;
+      hash_set_2_[hash2] = 0;
     }
     old_buffer_is_sorted_ = false;
     old_buffer_is_filtered_ = false;
=======================================
--- /branches/3.7/src/store-buffer.h    Thu Nov 10 03:38:15 2011
+++ /branches/3.7/src/store-buffer.h    Tue Jan 10 00:50:39 2012
@@ -85,8 +85,8 @@
   static const int kStoreBufferSize = kStoreBufferOverflowBit;
   static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
   static const int kOldStoreBufferLength = kStoreBufferLength * 16;
-  static const int kHashMapLengthLog2 = 12;
-  static const int kHashMapLength = 1 << kHashMapLengthLog2;
+  static const int kHashSetLengthLog2 = 12;
+  static const int kHashSetLength = 1 << kHashSetLengthLog2;

   void Compact();

@@ -146,13 +146,18 @@
   bool may_move_store_buffer_entries_;

   VirtualMemory* virtual_memory_;
-  uintptr_t* hash_map_1_;
-  uintptr_t* hash_map_2_;
+
+  // Two hash sets used for filtering.
+  // If address is in the hash set then it is guaranteed to be in the
+  // old part of the store buffer.
+  uintptr_t* hash_set_1_;
+  uintptr_t* hash_set_2_;
+  bool hash_sets_are_empty_;
+
+  void ClearFilteringHashSets();

   void CheckForFullBuffer();
   void Uniq();
-  void ZapHashTables();
-  bool HashTablesAreZapped();
   void ExemptPopularPages(int prime_sample_step, int threshold);

   void FindPointersToNewSpaceInRegion(Address start,
=======================================
--- /branches/3.7/src/version.cc        Fri Jan  6 03:37:26 2012
+++ /branches/3.7/src/version.cc        Tue Jan 10 00:50:39 2012
@@ -35,7 +35,7 @@
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     7
 #define BUILD_NUMBER      12
-#define PATCH_LEVEL       13
+#define PATCH_LEVEL       14
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to