Reviewers: Hannes Payer,
Message:
By setting code range size to 32MB, I can easily trigger OOM crash in gmail
because of code fragmentation. With this patch applied gmail doesn't crash.
I've been thinking about adding test, but that's a bit tricky since we need
to
simulate code space fragmentation. Not sure how to do that.
Description:
Reserve code range block for evacuation.
If we run out of code range, then GC wouldn't be able to compact code space,
because it wouldn't be able to allocate a new page. This can cause code
space
fragmentation and OOM crashes.
BUG=chromium:430118
LOG=Y
Please review this at https://codereview.chromium.org/742733002/
Base URL: https://chromium.googlesource.com/v8/v8.git@master
Affected files (+66, -22 lines):
M src/heap/heap.cc
M src/heap/mark-compact.cc
M src/heap/spaces.h
M src/heap/spaces.cc
Index: src/heap/heap.cc
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index
9785be0b91d59785a5b38a6da7ac38006ed8bfa2..4b9422e61c56eef6fa88fda84747d801a8c778cc
100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -5262,6 +5262,7 @@ bool Heap::SetUp() {
new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
if (code_space_ == NULL) return false;
if (!code_space_->SetUp()) return false;
+ isolate_->code_range()->ReserveEmergencyBlock();
// Initialize map space.
map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
Index: src/heap/mark-compact.cc
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index
a206b1d12e4beb525a4ffb2235e37ee9571ec830..db24c84b8759cb53f25979fadcd6122a164f3498
100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -4180,6 +4180,7 @@ void MarkCompactCollector::SweepSpaces() {
// Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
+ heap()->isolate()->code_range()->ReserveEmergencyBlock();
if (FLAG_print_cumulative_gc_stat) {
heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
Index: src/heap/spaces.cc
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index
30b141fcd32965637bd6ed1a2cc29284dff33750..42099759e3ed7d85ef0842f7003468cb725b51c8
100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -93,7 +93,8 @@ CodeRange::CodeRange(Isolate* isolate)
code_range_(NULL),
free_list_(0),
allocation_list_(0),
- current_allocation_block_index_(0) {}
+ current_allocation_block_index_(0),
+ emergency_block_() {}
bool CodeRange::SetUp(size_t requested) {
@@ -202,35 +203,20 @@ Address CodeRange::AllocateRawMemory(const size_t
requested_size,
const size_t commit_size,
size_t* allocated) {
DCHECK(commit_size <= requested_size);
- DCHECK(allocation_list_.length() == 0 ||
- current_allocation_block_index_ < allocation_list_.length());
- if (allocation_list_.length() == 0 ||
- requested_size >
allocation_list_[current_allocation_block_index_].size) {
- // Find an allocation block large enough.
- if (!GetNextAllocationBlock(requested_size)) return NULL;
- }
- // Commit the requested memory at the start of the current allocation
block.
- size_t aligned_requested = RoundUp(requested_size,
MemoryChunk::kAlignment);
- FreeBlock current = allocation_list_[current_allocation_block_index_];
- if (aligned_requested >= (current.size - Page::kPageSize)) {
- // Don't leave a small free block, useless for a large object or chunk.
- *allocated = current.size;
- } else {
- *allocated = aligned_requested;
+ FreeBlock current;
+ if (!ReserveBlock(requested_size, ¤t)) {
+ *allocated = 0;
+ return NULL;
}
+ *allocated = current.size;
DCHECK(*allocated <= current.size);
DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
if (!isolate_->memory_allocator()->CommitExecutableMemory(
code_range_, current.start, commit_size, *allocated)) {
*allocated = 0;
+ ReleaseBlock(¤t);
return NULL;
}
- allocation_list_[current_allocation_block_index_].start += *allocated;
- allocation_list_[current_allocation_block_index_].size -= *allocated;
- if (*allocated == current.size) {
- // This block is used up, get the next one.
- GetNextAllocationBlock(0);
- }
return current.start;
}
@@ -260,6 +246,49 @@ void CodeRange::TearDown() {
}
+bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock*
block) {
+ DCHECK(allocation_list_.length() == 0 ||
+ current_allocation_block_index_ < allocation_list_.length());
+ if (allocation_list_.length() == 0 ||
+ requested_size >
allocation_list_[current_allocation_block_index_].size) {
+ // Find an allocation block large enough.
+ if (!GetNextAllocationBlock(requested_size)) return false;
+ }
+ // Commit the requested memory at the start of the current allocation
block.
+ size_t aligned_requested = RoundUp(requested_size,
MemoryChunk::kAlignment);
+ *block = allocation_list_[current_allocation_block_index_];
+ // Don't leave a small free block, useless for a large object or chunk.
+ if (aligned_requested < (block->size - Page::kPageSize)) {
+ block->size = aligned_requested;
+ }
+ DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
+ allocation_list_[current_allocation_block_index_].start += block->size;
+ allocation_list_[current_allocation_block_index_].size -= block->size;
+ return true;
+}
+
+
+void CodeRange::ReleaseBlock(const FreeBlock* block) {
free_list_.Add(*block); }
+
+
+void CodeRange::ReserveEmergencyBlock() {
+ const size_t requested_size = MemoryAllocator::CodePageAreaSize();
+ if (emergency_block_.size == 0) {
+ ReserveBlock(requested_size, &emergency_block_);
+ } else {
+ DCHECK(emergency_block_.size >= requested_size);
+ }
+}
+
+
+void CodeRange::ReleaseEmergencyBlock() {
+ if (emergency_block_.size != 0) {
+ ReleaseBlock(&emergency_block_);
+ emergency_block_.size = 0;
+ }
+}
+
+
//
-----------------------------------------------------------------------------
// MemoryAllocator
//
@@ -1106,6 +1135,11 @@ void PagedSpace::ReleasePage(Page* page) {
void PagedSpace::CreateEmergencyMemory() {
+ if (identity() == CODE_SPACE) {
+ // Make the emergency block available to the allocator.
+ heap()->isolate()->code_range()->ReleaseEmergencyBlock();
+ DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
+ }
emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
AreaSize(), AreaSize(), executable(), this);
}
Index: src/heap/spaces.h
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index
ef294b243948b3cd4bbdc5101385baf295cb8a73..f3345f185cc1e68fb70cde85f0b4df5f08da0cad
100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -900,6 +900,9 @@ class CodeRange {
bool UncommitRawMemory(Address start, size_t length);
void FreeRawMemory(Address buf, size_t length);
+ void ReserveEmergencyBlock();
+ void ReleaseEmergencyBlock();
+
private:
Isolate* isolate_;
@@ -908,6 +911,7 @@ class CodeRange {
// Plain old data class, just a struct plus a constructor.
class FreeBlock {
public:
+ FreeBlock() : start(0), size(0) {}
FreeBlock(Address start_arg, size_t size_arg)
: start(start_arg), size(size_arg) {
DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
@@ -932,6 +936,8 @@ class CodeRange {
List<FreeBlock> allocation_list_;
int current_allocation_block_index_;
+ FreeBlock emergency_block_;
+
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
@@ -940,6 +946,8 @@ class CodeRange {
// Compares the start addresses of two free blocks.
static int CompareFreeBlockAddress(const FreeBlock* left,
const FreeBlock* right);
+ bool ReserveBlock(const size_t requested_size, FreeBlock* block);
+ void ReleaseBlock(const FreeBlock* block);
DISALLOW_COPY_AND_ASSIGN(CodeRange);
};
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.