Reviewers: Erik Corry,

Description:
Adapt fragmentation heuristics for over reserved pages.

This adapts the heuristics that detect fragmented pages to reduce memory
footprint for spaces with over reserved memory. This minimizes external
fragmentation caused by pages that cannot be released to the OS because
of just a few live objects on them.

[email protected]
TEST=cctest/test-heap/ReleaseOverReservedPages


Please review this at https://chromiumcodereview.appspot.com/10629004/

SVN Base: https://v8.googlecode.com/svn/branches/bleeding_edge

Affected files:
  M src/mark-compact.cc
  M test/cctest/test-alloc.cc
  M test/cctest/test-heap.cc


Index: src/mark-compact.cc
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 67f6e8e0a7556c89a1f5162ac441ec3da5b4a1c4..5789b5ffcc312a6026d5f5a8db7dfb0f82092050 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -500,12 +500,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
          space->identity() == OLD_DATA_SPACE ||
          space->identity() == CODE_SPACE);

-  int number_of_pages = space->CountTotalPages();
-
   const int kMaxMaxEvacuationCandidates = 1000;
-  int max_evacuation_candidates = Min(
-    kMaxMaxEvacuationCandidates,
-    static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1));
+  int number_of_pages = space->CountTotalPages();
+  int max_evacuation_candidates =
+      static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1);

   if (FLAG_stress_compaction || FLAG_always_compact) {
     max_evacuation_candidates = kMaxMaxEvacuationCandidates;
@@ -533,20 +531,33 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {

   intptr_t reserved = number_of_pages * space->AreaSize();
   intptr_t over_reserved = reserved - space->SizeOfObjects();
-  static const intptr_t kFreenessThreshold = 50;
+  intptr_t freeness_threshold = 100;
+
+  if (over_reserved >= 2 * space->AreaSize()) {

-  if (over_reserved >= 2 * space->AreaSize() &&
-      reduce_memory_footprint_) {
-    mode = REDUCE_MEMORY_FOOTPRINT;
+    // If reduction of memory footprint was requested, we are aggressive
+    // about choosing pages to free.  We expect that halve empty pages
+    // are easier to compact so slightly bump the limit.
+    if (reduce_memory_footprint_) {
+      mode = REDUCE_MEMORY_FOOTPRINT;
+      freeness_threshold = 50;
+      max_evacuation_candidates += 2;
+    }

- // We expect that empty pages are easier to compact so slightly bump the
-    // limit.
-    max_evacuation_candidates += 2;
+    // If over-usage is very high (more than a third of the space), we
+    // try to free all mostly empty pages.  We expect that empty pages
+    // are even easier to compact so bump the limit even more.
+    if (over_reserved > reserved / 3) {
+      mode = REDUCE_MEMORY_FOOTPRINT;
+      freeness_threshold = 80;
+      max_evacuation_candidates *= 2;
+    }

-    if (FLAG_trace_fragmentation) {
- PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n",
+    if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
+ PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
              static_cast<double>(over_reserved) / MB,
-             static_cast<int>(kFreenessThreshold));
+             static_cast<double>(reserved) / MB,
+             static_cast<int>(freeness_threshold));
     }
   }

@@ -554,6 +565,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {

   Candidate candidates[kMaxMaxEvacuationCandidates];

+  max_evacuation_candidates =
+      Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
+
   int count = 0;
   int fragmentation = 0;
   Candidate* least = NULL;
@@ -587,7 +601,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {

       int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();

-      if (free_pct >= kFreenessThreshold) {
+      if (free_pct >= freeness_threshold) {
         estimated_release += 2 * p->area_size() - free_bytes;
         fragmentation = free_pct;
       } else {
Index: test/cctest/test-alloc.cc
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index e195d14923ef324dea976cd94aa2ec3d52594196..a8e504fd443a6c77dddfbde352c372f11206a569 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -34,7 +34,8 @@
 using namespace v8::internal;


-static inline void SimulateFullSpace(PagedSpace* space) {
+// Also used in test-heap.cc test cases.
+void SimulateFullSpace(PagedSpace* space) {
   int old_linear_size = static_cast<int>(space->limit() - space->top());
   space->Free(space->top(), old_linear_size);
   space->SetTop(space->limit(), space->limit());
Index: test/cctest/test-heap.cc
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index 179ca1c63cd1f2add33dfeeaf10fe8081cc89e80..410e52b118b926714557cdfed711a6538e91ffd6 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -1899,3 +1899,42 @@ TEST(Regress2143b) {
   CHECK(root->IsJSObject());
   CHECK(root->map()->IsMap());
 }
+
+
+// Implemented in the test-alloc.cc test suite.
+void SimulateFullSpace(PagedSpace* space);
+
+
+TEST(ReleaseOverReservedPages) {
+  i::FLAG_trace_gc = true;
+  InitializeVM();
+  v8::HandleScope scope;
+  static const int number_of_test_pages = 20;
+
+  // Prepare many pages with low live-bytes count.
+  PagedSpace* old_pointer_space = HEAP->old_pointer_space();
+  CHECK_EQ(1, old_pointer_space->CountTotalPages());
+  for (int i = 0; i < number_of_test_pages; i++) {
+    AlwaysAllocateScope always_allocate;
+    SimulateFullSpace(old_pointer_space);
+    FACTORY->NewFixedArray(1, TENURED);
+  }
+  CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
+
+  // Triggering one GC will cause a lot of garbage to be discovered but
+  // even spread across all allocated pages.
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered for preparation");
+  CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
+
+  // Triggering subsequent GCs should cause at least halve of the pages
+  // to be released to the OS after at most two cycles.
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 1");
+  CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
+  HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 2");
+ CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages() * 2);
+
+  // Triggering a last-resort GC should cause all pages to be released
+  // to the OS so that other processes can seize the memory.
+  HEAP->CollectAllAvailableGarbage("triggered really hard");
+  CHECK_EQ(1, old_pointer_space->CountTotalPages());
+}


--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev

Reply via email to