Revision: 22822
Author: [email protected]
Date: Mon Aug 4 14:18:05 2014 UTC
Log: Version 3.27.34.11 (merged r22007, r22019, r22090)
Wait for sweeper threads when expansion of old generation fails.
Collect garbage with kReduceMemoryFootprintMask in IdleNotification.
Waiting for sweeper threads is last resort in SlowAllocateRaw.
BUG=350720
LOG=N
[email protected]
Review URL: https://codereview.chromium.org/419743003
http://code.google.com/p/v8/source/detail?r=22822
Modified:
/branches/3.27/src/heap.cc
/branches/3.27/src/spaces.cc
/branches/3.27/src/spaces.h
/branches/3.27/src/version.cc
=======================================
--- /branches/3.27/src/heap.cc Wed Jul 2 12:53:18 2014 UTC
+++ /branches/3.27/src/heap.cc Mon Aug 4 14:18:05 2014 UTC
@@ -4245,7 +4245,8 @@
isolate_->compilation_cache()->Clear();
uncommit = true;
}
- CollectAllGarbage(kNoGCFlags, "idle notification: finalize
incremental");
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ "idle notification: finalize incremental");
mark_sweeps_since_idle_round_started_++;
gc_count_at_last_idle_gc_ = gc_count_;
if (uncommit) {
=======================================
--- /branches/3.27/src/spaces.cc Mon Jun 30 07:20:01 2014 UTC
+++ /branches/3.27/src/spaces.cc Mon Aug 4 14:18:05 2014 UTC
@@ -2575,6 +2575,22 @@
allocation_info_.set_limit(NULL);
}
}
+
+
+HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
+ int size_in_bytes) {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+
+ // If sweeper threads are still running, wait for them.
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
+
+ // After waiting for the sweeper threads, there may be new free-list
+ // entries.
+ return free_list_.Allocate(size_in_bytes);
+ }
+ return NULL;
+}
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
@@ -2593,9 +2609,12 @@
// Free list allocation failed and there is no next page. Fail if we
have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
- return NULL;
+ if (!heap()->always_allocate()
+ && heap()->OldGenerationAllocationLimitReached()) {
+ // If sweeper threads are active, wait for them at that point and steal
+ // elements form their free-lists.
+ HeapObject* object =
WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ if (object != NULL) return object;
}
// Try to expand the space and allocate in the new next page.
@@ -2604,18 +2623,10 @@
return free_list_.Allocate(size_in_bytes);
}
- // If sweeper threads are active, wait for them at that point.
- if (collector->IsConcurrentSweepingInProgress()) {
- collector->WaitUntilSweepingCompleted();
-
- // After waiting for the sweeper threads, there may be new free-list
- // entries.
- HeapObject* object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) return object;
- }
-
- // Finally, fail.
- return NULL;
+ // If sweeper threads are active, wait for them at that point and steal
+ // elements form their free-lists. Allocation may still fail their which
+ // would indicate that there is not enough memory for the given
allocation.
+ return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
}
=======================================
--- /branches/3.27/src/spaces.h Tue Jun 17 08:22:36 2014 UTC
+++ /branches/3.27/src/spaces.h Mon Aug 4 14:18:05 2014 UTC
@@ -2003,8 +2003,11 @@
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes);
+ MUST_USE_RESULT HeapObject*
+ WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes);
+
// Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
friend class MarkCompactCollector;
=======================================
--- /branches/3.27/src/version.cc Mon Aug 4 11:23:46 2014 UTC
+++ /branches/3.27/src/version.cc Mon Aug 4 14:18:05 2014 UTC
@@ -35,7 +35,7 @@
#define MAJOR_VERSION 3
#define MINOR_VERSION 27
#define BUILD_NUMBER 34
-#define PATCH_LEVEL 10
+#define PATCH_LEVEL 11
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.