Reviewers: Michael Lippautz,
Description:
Synchronize on concurrent store buffer entries.
BUG=chromium:524425
LOG=n
Please review this at https://codereview.chromium.org/1313313002/
Base URL: https://chromium.googlesource.com/v8/v8.git@master
Affected files (+33, -3 lines):
M src/heap/mark-compact.h
M src/heap/mark-compact.cc
M src/heap/store-buffer.h
M src/heap/store-buffer-inl.h
Index: src/heap/mark-compact.cc
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index
dc93a5a9a746549ce548a478a56b623dc34bc423..738e00b09ceb6fd0a4ae804dbe3b1e27aef125c0
100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -47,6 +47,7 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
compacting_(false),
was_marked_incrementally_(false),
sweeping_in_progress_(false),
+ parallel_compaction_in_progress_(false),
pending_sweeper_jobs_semaphore_(0),
pending_compaction_jobs_semaphore_(0),
evacuation_(false),
@@ -2690,7 +2691,11 @@ void MarkCompactCollector::AbortWeakCells() {
void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot)
{
if (heap_->InNewSpace(value)) {
- heap_->store_buffer()->Mark(slot);
+ if (parallel_compaction_in_progress_) {
+ heap_->store_buffer()->MarkSynchronized(slot);
+ } else {
+ heap_->store_buffer()->Mark(slot);
+ }
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
reinterpret_cast<Object**>(slot),
@@ -3313,11 +3318,18 @@ void
MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
void MarkCompactCollector::EvacuatePagesInParallel() {
+ parallel_compaction_in_progress_ = true;
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new CompactionTask(heap()), v8::Platform::kShortRunningTask);
}
+void MarkCompactCollector::WaitUntilCompactionCompleted() {
+ pending_compaction_jobs_semaphore_.Wait();
+ parallel_compaction_in_progress_ = false;
+}
+
+
void MarkCompactCollector::EvacuatePages() {
int npages = evacuation_candidates_.length();
int abandoned_pages = 0;
@@ -3626,7 +3638,7 @@ void
MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
EvacuationScope evacuation_scope(this);
if (FLAG_parallel_compaction) {
EvacuatePagesInParallel();
- pending_compaction_jobs_semaphore_.Wait();
+ WaitUntilCompactionCompleted();
} else {
EvacuatePages();
}
Index: src/heap/mark-compact.h
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index
d6ff1c96144232fc38bdeefa4d91001da58a6f7f..ffb54bb462cbd13dbb595d27b902a4c51eb38067
100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -707,6 +707,9 @@ class MarkCompactCollector {
// True if concurrent or parallel sweeping is currently in progress.
bool sweeping_in_progress_;
+ // True if parallel compaction is currently in progress.
+ bool parallel_compaction_in_progress_;
+
// Synchronize sweeper threads.
base::Semaphore pending_sweeper_jobs_semaphore_;
@@ -873,6 +876,8 @@ class MarkCompactCollector {
void EvacuatePagesInParallel();
+ void WaitUntilCompactionCompleted();
+
void EvacuateNewSpaceAndCandidates();
void ReleaseEvacuationCandidates();
Index: src/heap/store-buffer-inl.h
diff --git a/src/heap/store-buffer-inl.h b/src/heap/store-buffer-inl.h
index
230384af7a34f74ce42281308512b74af0df68e7..1f3dda21d223788360acddf44399548ea97bfe09
100644
--- a/src/heap/store-buffer-inl.h
+++ b/src/heap/store-buffer-inl.h
@@ -26,6 +26,12 @@ void StoreBuffer::Mark(Address addr) {
}
+inline void StoreBuffer::MarkSynchronized(Address addr) {
+ base::LockGuard<base::Mutex> lock_guard(&mutex_);
+ Mark(addr);
+}
+
+
void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
if (store_buffer_rebuilding_enabled_) {
SLOW_DCHECK(!heap_->code_space()->Contains(addr) &&
Index: src/heap/store-buffer.h
diff --git a/src/heap/store-buffer.h b/src/heap/store-buffer.h
index
cb96fa9720a64909fbd1dcf04917f0018f095a08..37a78eb07528b212752828f885bba25d5358933b
100644
--- a/src/heap/store-buffer.h
+++ b/src/heap/store-buffer.h
@@ -33,9 +33,13 @@ class StoreBuffer {
void SetUp();
void TearDown();
- // This is used by the mutator to enter addresses into the store buffer.
+ // This is used to add addresses to the store buffer non-concurrently.
inline void Mark(Address addr);
+ // This is used to add addresses to the store buffer when multiple
threads
+ // may operate on the store buffer.
+ inline void MarkSynchronized(Address addr);
+
// This is used by the heap traversal to enter the addresses into the
store
// buffer that should still be in the store buffer after GC. It enters
// addresses directly into the old buffer because the GC starts by
wiping the
@@ -129,6 +133,9 @@ class StoreBuffer {
uintptr_t* hash_set_2_;
bool hash_sets_are_empty_;
+ // Used for synchronization of concurrent store buffer access.
+ base::Mutex mutex_;
+
void ClearFilteringHashSets();
bool SpaceAvailable(intptr_t space_needed);
--
--
v8-dev mailing list
[email protected]
http://groups.google.com/group/v8-dev
---
You received this message because you are subscribed to the Google Groups "v8-dev" group.
To unsubscribe from this group and stop receiving emails from it, send an email
to [email protected].
For more options, visit https://groups.google.com/d/optout.