Revision: 10339
Author:   [email protected]
Date:     Thu Jan  5 03:49:16 2012
Log:      Merge r10334 from bleeding_edge to trunk.

Ensure that store buffer filtering hash sets are cleared after StoreBuffer::Filter.
Review URL: http://codereview.chromium.org/9113010
http://code.google.com/p/v8/source/detail?r=10339

Modified:
 /trunk/src/store-buffer.cc
 /trunk/src/store-buffer.h
 /trunk/src/version.cc

=======================================
--- /trunk/src/store-buffer.cc  Tue Dec 13 00:07:27 2011
+++ /trunk/src/store-buffer.cc  Thu Jan  5 03:49:16 2012
@@ -49,8 +49,9 @@
       callback_(NULL),
       may_move_store_buffer_entries_(true),
       virtual_memory_(NULL),
-      hash_map_1_(NULL),
-      hash_map_2_(NULL) {
+      hash_set_1_(NULL),
+      hash_set_2_(NULL),
+      hash_sets_are_empty_(true) {
 }


@@ -97,18 +98,19 @@
                                 false));  // Not executable.
   heap_->public_set_store_buffer_top(start_);

-  hash_map_1_ = new uintptr_t[kHashMapLength];
-  hash_map_2_ = new uintptr_t[kHashMapLength];
-
-  ZapHashTables();
+  hash_set_1_ = new uintptr_t[kHashSetLength];
+  hash_set_2_ = new uintptr_t[kHashSetLength];
+  hash_sets_are_empty_ = false;
+
+  ClearFilteringHashSets();
 }


 void StoreBuffer::TearDown() {
   delete virtual_memory_;
   delete old_virtual_memory_;
-  delete[] hash_map_1_;
-  delete[] hash_map_2_;
+  delete[] hash_set_1_;
+  delete[] hash_set_2_;
   old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
   start_ = limit_ = NULL;
   heap_->public_set_store_buffer_top(start_);
@@ -148,7 +150,6 @@


 void StoreBuffer::Uniq() {
-  ASSERT(HashTablesAreZapped());
   // Remove adjacent duplicates and cells that do not point at new space.
   Address previous = NULL;
   Address* write = old_start_;
@@ -272,13 +273,16 @@
     }
   }
   old_top_ = new_top;
+
+  // Filtering hash sets are inconsistent with the store buffer after this
+  // operation.
+  ClearFilteringHashSets();
 }


 void StoreBuffer::SortUniq() {
   Compact();
   if (old_buffer_is_sorted_) return;
-  ZapHashTables();
   qsort(reinterpret_cast<void*>(old_start_),
         old_top_ - old_start_,
         sizeof(*old_top_),
@@ -286,6 +290,10 @@
   Uniq();

   old_buffer_is_sorted_ = true;
+
+  // Filtering hash sets are inconsistent with the store buffer after this
+  // operation.
+  ClearFilteringHashSets();
 }


@@ -301,33 +309,21 @@
   if (page_has_scan_on_scavenge_flag) {
     Filter(MemoryChunk::SCAN_ON_SCAVENGE);
   }
-  ZapHashTables();
+
+  // Filtering hash sets are inconsistent with the store buffer after
+  // iteration.
+  ClearFilteringHashSets();
+
   return page_has_scan_on_scavenge_flag;
 }


 #ifdef DEBUG
 void StoreBuffer::Clean() {
-  ZapHashTables();
+  ClearFilteringHashSets();
   Uniq();  // Also removes things that no longer point to new space.
   CheckForFullBuffer();
 }
-
-
-static bool Zapped(char* start, int size) {
-  for (int i = 0; i < size; i++) {
-    if (start[i] != 0) return false;
-  }
-  return true;
-}
-
-
-bool StoreBuffer::HashTablesAreZapped() {
-  return Zapped(reinterpret_cast<char*>(hash_map_1_),
-                sizeof(uintptr_t) * kHashMapLength) &&
-      Zapped(reinterpret_cast<char*>(hash_map_2_),
-             sizeof(uintptr_t) * kHashMapLength);
-}


 static Address* in_store_buffer_1_element_cache = NULL;
@@ -357,18 +353,21 @@
 #endif


-void StoreBuffer::ZapHashTables() {
-  memset(reinterpret_cast<void*>(hash_map_1_),
-         0,
-         sizeof(uintptr_t) * kHashMapLength);
-  memset(reinterpret_cast<void*>(hash_map_2_),
-         0,
-         sizeof(uintptr_t) * kHashMapLength);
+void StoreBuffer::ClearFilteringHashSets() {
+  if (!hash_sets_are_empty_) {
+    memset(reinterpret_cast<void*>(hash_set_1_),
+           0,
+           sizeof(uintptr_t) * kHashSetLength);
+    memset(reinterpret_cast<void*>(hash_set_2_),
+           0,
+           sizeof(uintptr_t) * kHashSetLength);
+    hash_sets_are_empty_ = true;
+  }
 }


 void StoreBuffer::GCPrologue() {
-  ZapHashTables();
+  ClearFilteringHashSets();
   during_gc_ = true;
 }

@@ -676,8 +675,9 @@
   ASSERT(may_move_store_buffer_entries_);
   // Goes through the addresses in the store buffer attempting to remove
   // duplicates.  In the interest of speed this is a lossy operation.  Some
-  // duplicates will remain.  We have two hash tables with different hash
+  // duplicates will remain.  We have two hash sets with different hash
   // functions to reduce the number of unnecessary clashes.
+  hash_sets_are_empty_ = false;  // Hash sets are in use.
   for (Address* current = start_; current < top; current++) {
     ASSERT(!heap_->cell_space()->Contains(*current));
     ASSERT(!heap_->code_space()->Contains(*current));
@@ -686,21 +686,21 @@
     // Shift out the last bits including any tags.
     int_addr >>= kPointerSizeLog2;
     int hash1 =
- ((int_addr ^ (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
-    if (hash_map_1_[hash1] == int_addr) continue;
+ ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
+    if (hash_set_1_[hash1] == int_addr) continue;
     int hash2 =
- ((int_addr - (int_addr >> kHashMapLengthLog2)) & (kHashMapLength - 1));
-    hash2 ^= hash2 >> (kHashMapLengthLog2 * 2);
-    if (hash_map_2_[hash2] == int_addr) continue;
-    if (hash_map_1_[hash1] == 0) {
-      hash_map_1_[hash1] = int_addr;
-    } else if (hash_map_2_[hash2] == 0) {
-      hash_map_2_[hash2] = int_addr;
+ ((int_addr - (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1));
+    hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
+    if (hash_set_2_[hash2] == int_addr) continue;
+    if (hash_set_1_[hash1] == 0) {
+      hash_set_1_[hash1] = int_addr;
+    } else if (hash_set_2_[hash2] == 0) {
+      hash_set_2_[hash2] = int_addr;
     } else {
// Rather than slowing down we just throw away some entries. This will
       // cause some duplicates to remain undetected.
-      hash_map_1_[hash1] = int_addr;
-      hash_map_2_[hash2] = 0;
+      hash_set_1_[hash1] = int_addr;
+      hash_set_2_[hash2] = 0;
     }
     old_buffer_is_sorted_ = false;
     old_buffer_is_filtered_ = false;
=======================================
--- /trunk/src/store-buffer.h   Tue Dec 13 00:07:27 2011
+++ /trunk/src/store-buffer.h   Thu Jan  5 03:49:16 2012
@@ -85,8 +85,8 @@
   static const int kStoreBufferSize = kStoreBufferOverflowBit;
   static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
   static const int kOldStoreBufferLength = kStoreBufferLength * 16;
-  static const int kHashMapLengthLog2 = 12;
-  static const int kHashMapLength = 1 << kHashMapLengthLog2;
+  static const int kHashSetLengthLog2 = 12;
+  static const int kHashSetLength = 1 << kHashSetLengthLog2;

   void Compact();

@@ -148,13 +148,18 @@
   bool may_move_store_buffer_entries_;

   VirtualMemory* virtual_memory_;
-  uintptr_t* hash_map_1_;
-  uintptr_t* hash_map_2_;
+
+  // Two hash sets used for filtering.
+  // If address is in the hash set then it is guaranteed to be in the
+  // old part of the store buffer.
+  uintptr_t* hash_set_1_;
+  uintptr_t* hash_set_2_;
+  bool hash_sets_are_empty_;
+
+  void ClearFilteringHashSets();

   void CheckForFullBuffer();
   void Uniq();
-  void ZapHashTables();
-  bool HashTablesAreZapped();
   void ExemptPopularPages(int prime_sample_step, int threshold);

   void FindPointersToNewSpaceInRegion(Address start,
=======================================
--- /trunk/src/version.cc       Mon Jan  2 01:26:59 2012
+++ /trunk/src/version.cc       Thu Jan  5 03:49:16 2012
@@ -35,7 +35,7 @@
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     8
 #define BUILD_NUMBER      4
-#define PATCH_LEVEL       0
+#define PATCH_LEVEL       1
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0

--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to