Reviewers: Hannes Payer,
Message:
Hannes, PTAL. This is the last incarnation of deferred free I was tinkering
with. As discussed results varied across platforms. Interested to know if it
helps.
Description:
Experimental support for deferring the release of memory chunks. It is
enabled when --defer-free appears on the command-line.
BUG=
LOG=NO
Please review this at https://codereview.chromium.org/1308433002/
Base URL: https://chromium.googlesource.com/v8/v8.git@master
Affected files (+89, -2 lines):
M src/flag-definitions.h
M src/heap/heap.cc
M src/heap/spaces.h
M src/heap/spaces.cc
Index: src/flag-definitions.h
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index
88ba0e402f314e6f2bc45e6680ea40b38c479835..8bb9546a5a12e7100215e542b6587c16842cb957
100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -826,6 +826,10 @@ DEFINE_INT(external_allocation_limit_incremental_time,
1,
"Time spent in incremental marking steps (in ms) once the
external "
"allocation limit is reached")
+// spaces.cc
+DEFINE_BOOL(defer_free, false, "batch release pages on multiple threads.")
+DEFINE_NEG_IMPLICATION(predictable, defer_free)
+
//
// Dev shell flags
//
Index: src/heap/heap.cc
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index
19c668e5bfaf1e35f48d8e3cdc75d35297bf4d2b..003191d2de4a2cc8831926737d530a29f10a443e
100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -6704,10 +6704,14 @@ void Heap::FreeQueuedChunks() {
}
isolate_->heap()->store_buffer()->Compact();
isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
+
+ isolate_->memory_allocator()->EnterDeferredFreeMode();
for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
next = chunk->next_chunk();
isolate_->memory_allocator()->Free(chunk);
}
+ isolate_->memory_allocator()->LeaveDeferredFreeMode();
+
chunks_queued_for_free_ = NULL;
}
Index: src/heap/spaces.cc
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index
e66fd3944cf57c91911013c137c6a91318edf243..105b4f84ef41fa841b9bf8ac8890fa85930a3391
100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -11,6 +11,7 @@
#include "src/macro-assembler.h"
#include "src/msan.h"
#include "src/snapshot/snapshot.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
@@ -296,7 +297,9 @@ MemoryAllocator::MemoryAllocator(Isolate* isolate)
size_(0),
size_executable_(0),
lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
- highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
+ highest_ever_allocated_(reinterpret_cast<void*>(0)),
+ deferred_free_mode_(false),
+ deferred_free_head_(NULL) {}
bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t
capacity_executable) {
@@ -353,7 +356,16 @@ void MemoryAllocator::FreeMemory(base::VirtualMemory*
reservation,
DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
!isolate_->code_range()->valid() || size <= Page::kPageSize);
- reservation->Release();
+ if (deferred_free_mode_) {
+ MemoryChunk* chunk =
+ MemoryChunk::FromAddress((Address)reservation->address());
+ chunk->set_next_chunk(deferred_free_head_);
+ DCHECK(chunk->size() == reservation->size());
+ deferred_free_head_ = chunk;
+ reservation->Reset();
+ } else {
+ reservation->Release();
+ }
}
@@ -383,6 +395,63 @@ void MemoryAllocator::FreeMemory(Address base, size_t
size,
}
+class BatchFreeTask : public v8::Task {
+ public:
+ explicit BatchFreeTask(MemoryChunk* head) : head_(head) {}
+ virtual ~BatchFreeTask() {}
+
+ static void BatchFree(MemoryChunk* head) {
+ while (head != NULL) {
+ MemoryChunk* next = head->next_chunk();
+ bool result =
+ base::VirtualMemory::ReleaseRegion(head->address(),
head->size());
+ USE(result);
+ DCHECK(result);
+ head = next;
+ }
+ }
+
+ private:
+ void Run() override { BatchFree(head_); }
+
+ MemoryChunk* head_;
+
+ DISALLOW_COPY_AND_ASSIGN(BatchFreeTask);
+};
+
+
+void MemoryAllocator::EnterDeferredFreeMode() {
+ deferred_free_mode_ = FLAG_defer_free;
+}
+
+
+void MemoryAllocator::LeaveDeferredFreeMode() {
+ static const int kBatchSize = 4;
+
+ // Free batches on background threads.
+ MemoryChunk* tail = deferred_free_head_;
+ for (int count = 0; tail != NULL; count++) {
+ MemoryChunk* next = tail->next_chunk();
+ if ((count % kBatchSize) == kBatchSize - 1) {
+ tail->set_next_chunk(NULL);
+
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new BatchFreeTask(deferred_free_head_),
+ v8::Platform::kShortRunningTask);
+
+ deferred_free_head_ = next;
+ }
+ tail = next;
+ }
+
+ // Free remnants on main thread.
+ BatchFreeTask::BatchFree(deferred_free_head_);
+
+ deferred_free_head_ = NULL;
+ deferred_free_mode_ = false;
+}
+
+
Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t
alignment,
base::VirtualMemory*
controller) {
base::VirtualMemory reservation(size, alignment);
Index: src/heap/spaces.h
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index
2ea2e909aa0c9b567da8e0d280a56a84e48cc977..fd425cda15b9ed75bdacb1332120ec925a712280
100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -1131,6 +1131,11 @@ class MemoryAllocator {
void FreeMemory(base::VirtualMemory* reservation, Executability
executable);
void FreeMemory(Address addr, size_t size, Executability executable);
+ // Spread costly region free operations across worker threads if batch
+ // of pages is large enough.
+ void EnterDeferredFreeMode();
+ void LeaveDeferredFreeMode();
+
// Commit a contiguous block of memory from the initial chunk. Assumes
that
// the address is not NULL, the size is greater than zero, and that the
// block is contained in the initial chunk. Returns true if it succeeded
@@ -1200,6 +1205,11 @@ class MemoryAllocator {
void* lowest_ever_allocated_;
void* highest_ever_allocated_;
+ // Entered deferred free mode
+ bool deferred_free_mode_;
+ // List of potentially deferred blocks
+ MemoryChunk* deferred_free_head_;
+
struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
ObjectSpace space,
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.