Reviewers: Michael Lippautz,
Description:
Synchronize on concurrent slot buffer entries during migration.
BUG=chromium:524425
LOG=n
Please review this at https://codereview.chromium.org/1314133004/
Base URL: https://chromium.googlesource.com/v8/v8.git@master
Affected files (+76, -13 lines):
M src/heap/mark-compact.h
M src/heap/mark-compact.cc
M src/heap/store-buffer.h
M src/heap/store-buffer-inl.h
Index: src/heap/mark-compact.cc
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index
b5f191094c9b96f532aa3c9749eb48cf475d87d7..2bad51e21ff7e3b2257b81e6f09099d023307dd3
100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -2690,15 +2690,55 @@ void MarkCompactCollector::AbortWeakCells() {
void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot)
{
+ // When parallel compaction is in progress, store and slots buffer
entries
+ // require synchronization.
if (heap_->InNewSpace(value)) {
if (parallel_compaction_in_progress_) {
- heap_->store_buffer()->MarkSynchronized(slot);
+ heap_->store_buffer()->SynchronizedMark(slot);
} else {
heap_->store_buffer()->Mark(slot);
}
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
+ if (parallel_compaction_in_progress_) {
+ SlotsBuffer::SynchronizedAddTo(
+ &slots_buffer_allocator_, &migration_slots_buffer_,
+ &migration_slots_buffer_mutex_, reinterpret_cast<Object**>(slot),
+ SlotsBuffer::IGNORE_OVERFLOW);
+ } else {
+ SlotsBuffer::AddTo(&slots_buffer_allocator_,
&migration_slots_buffer_,
+ reinterpret_cast<Object**>(slot),
+ SlotsBuffer::IGNORE_OVERFLOW);
+ }
+ }
+}
+
+
+void MarkCompactCollector::RecordMigratedCodeEntrySlot(
+ Address code_entry, Address code_entry_slot) {
+ if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+ if (parallel_compaction_in_progress_) {
+ SlotsBuffer::SynchronizedAddTo(
+ &slots_buffer_allocator_, &migration_slots_buffer_,
+ &migration_slots_buffer_mutex_, SlotsBuffer::CODE_ENTRY_SLOT,
+ code_entry_slot, SlotsBuffer::IGNORE_OVERFLOW);
+ } else {
+ SlotsBuffer::AddTo(&slots_buffer_allocator_,
&migration_slots_buffer_,
+ SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
+ SlotsBuffer::IGNORE_OVERFLOW);
+ }
+ }
+}
+
+
+void MarkCompactCollector::RecordMigratedCodeObjectSlot(Address
code_object) {
+ if (parallel_compaction_in_progress_) {
+ SlotsBuffer::SynchronizedAddTo(
+ &slots_buffer_allocator_, &migration_slots_buffer_,
+ &migration_slots_buffer_mutex_, SlotsBuffer::RELOCATED_CODE_OBJECT,
+ code_object, SlotsBuffer::IGNORE_OVERFLOW);
+ } else {
SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
- reinterpret_cast<Object**>(slot),
+ SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
SlotsBuffer::IGNORE_OVERFLOW);
}
}
@@ -2743,19 +2783,12 @@ void
MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
if (compacting_ && dst->IsJSFunction()) {
Address code_entry_slot = dst->address() +
JSFunction::kCodeEntryOffset;
Address code_entry = Memory::Address_at(code_entry_slot);
-
- if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_,
&migration_slots_buffer_,
- SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
- SlotsBuffer::IGNORE_OVERFLOW);
- }
+ RecordMigratedCodeEntrySlot(code_entry, code_entry_slot);
}
} else if (dest == CODE_SPACE) {
PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
heap()->MoveBlock(dst_addr, src_addr, size);
- SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
- SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
- SlotsBuffer::IGNORE_OVERFLOW);
+ RecordMigratedCodeObjectSlot(dst_addr);
Code::cast(dst)->Relocate(dst_addr - src_addr);
} else {
DCHECK(dest == NEW_SPACE);
@@ -4487,6 +4520,15 @@ bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
}
+bool SlotsBuffer::SynchronizedAddTo(SlotsBufferAllocator* allocator,
+ SlotsBuffer** buffer_address,
+ base::Mutex* buffer_mutex, SlotType
type,
+ Address addr, AdditionMode mode) {
+ base::LockGuard<base::Mutex> lock_guard(buffer_mutex);
+ return AddTo(allocator, buffer_address, type, addr, mode);
+}
+
+
bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, SlotType type,
Address addr, AdditionMode mode) {
Index: src/heap/mark-compact.h
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index
ffb54bb462cbd13dbb595d27b902a4c51eb38067..60857ed570ba8e4ea3340140a686f7e0500b7a8c
100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -374,6 +374,14 @@ class SlotsBuffer {
return buffer != NULL && buffer->chain_length_ >=
kChainLengthThreshold;
}
+ INLINE(static bool SynchronizedAddTo(SlotsBufferAllocator* allocator,
+ SlotsBuffer** buffer_address,
+ base::Mutex* buffer_mutex,
+ ObjectSlot slot, AdditionMode
mode)) {
+ base::LockGuard<base::Mutex> lock_guard(buffer_mutex);
+ return AddTo(allocator, buffer_address, slot, mode);
+ }
+
INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, ObjectSlot slot,
AdditionMode mode)) {
@@ -392,6 +400,11 @@ class SlotsBuffer {
static bool IsTypedSlot(ObjectSlot slot);
+ static bool SynchronizedAddTo(SlotsBufferAllocator* allocator,
+ SlotsBuffer** buffer_address,
+ base::Mutex* buffer_mutex, SlotType type,
+ Address addr, AdditionMode mode);
+
static bool AddTo(SlotsBufferAllocator* allocator,
SlotsBuffer** buffer_address, SlotType type, Address
addr,
AdditionMode mode);
@@ -722,6 +735,8 @@ class MarkCompactCollector {
SlotsBuffer* migration_slots_buffer_;
+ base::Mutex migration_slots_buffer_mutex_;
+
// Finishes GC, performs heap verification if enabled.
void Finish();
@@ -897,6 +912,12 @@ class MarkCompactCollector {
// Updates store buffer and slot buffer for a pointer in a migrating
object.
void RecordMigratedSlot(Object* value, Address slot);
+ // Adds the code entry slot to the slots buffer.
+ void RecordMigratedCodeEntrySlot(Address code_entry, Address
code_entry_slot);
+
+ // Adds the slot of a moved code object.
+ void RecordMigratedCodeObjectSlot(Address code_object);
+
#ifdef DEBUG
friend class MarkObjectVisitor;
static void VisitObject(HeapObject* obj);
Index: src/heap/store-buffer-inl.h
diff --git a/src/heap/store-buffer-inl.h b/src/heap/store-buffer-inl.h
index
1f3dda21d223788360acddf44399548ea97bfe09..0fbca4b2fe64ffb68d9b4c3bb47965cdb16bac61
100644
--- a/src/heap/store-buffer-inl.h
+++ b/src/heap/store-buffer-inl.h
@@ -26,7 +26,7 @@ void StoreBuffer::Mark(Address addr) {
}
-inline void StoreBuffer::MarkSynchronized(Address addr) {
+inline void StoreBuffer::SynchronizedMark(Address addr) {
base::LockGuard<base::Mutex> lock_guard(&mutex_);
Mark(addr);
}
Index: src/heap/store-buffer.h
diff --git a/src/heap/store-buffer.h b/src/heap/store-buffer.h
index
37a78eb07528b212752828f885bba25d5358933b..0af40f0b9978201a9a65beac7cf6f52158742b2d
100644
--- a/src/heap/store-buffer.h
+++ b/src/heap/store-buffer.h
@@ -38,7 +38,7 @@ class StoreBuffer {
// This is used to add addresses to the store buffer when multiple
threads
// may operate on the store buffer.
- inline void MarkSynchronized(Address addr);
+ inline void SynchronizedMark(Address addr);
// This is used by the heap traversal to enter the addresses into the
store
// buffer that should still be in the store buffer after GC. It enters
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.