Diff
Modified: trunk/Source/_javascript_Core/ChangeLog (207178 => 207179)
--- trunk/Source/_javascript_Core/ChangeLog 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/ChangeLog 2016-10-11 23:52:02 UTC (rev 207179)
@@ -1,3 +1,98 @@
+2016-10-06 Filip Pizlo <[email protected]>
+
+ MarkedBlock should know what objects are live during marking
+ https://bugs.webkit.org/show_bug.cgi?id=162309
+
+ Reviewed by Geoffrey Garen.
+
+ It used to be that we would forget which objects are live the moment we started collection.
+ That's because the flip at the beginning clears all mark bits.
+
+ But we already have a facility for tracking objects that are live-but-not-marked. It's called
+ newlyAllocated. So, instead of clearing mark bits, we want to just transfer them to
+ newlyAllocated. Then we want to clear all newlyAllocated after GC.
+
+ This implements such an approach, along with a versioning optimization for newlyAllocated.
+ Instead of walking the whole heap to clear newlyAllocated bits at the end of the GC, we bump
+ the newlyAllocatedVersion, which causes MarkedBlock to treat newlyAllocated as if it was
+ clear.
+
+ We could have even avoided allocating newlyAllocated in most cases, since empirically most
+ blocks are either completely empty or completely full. An earlier version of this patch did
+ this, but it was not better than this patch. In fact, it seemed to actually be worse for PLT
+ and membuster.
+
+ To validate this change, we now run the conservative scan after the beginMarking flip. And it
+ totally works!
+
+ This is a huge step towards concurrent GC. It means that we ought to be able to run the
+ allocator while marking. Since we already separately made it possible to run the barrier
+ while marking, this means that we're pretty much ready for some serious concurrency action.
+
+ This appears to be perf-neutral and space-neutral.
+
+ * _javascript_Core.xcodeproj/project.pbxproj:
+ * bytecode/CodeBlock.cpp:
+ * bytecode/CodeBlock.h:
+ (JSC::CodeBlockSet::mark): Deleted.
+ * heap/CodeBlockSet.cpp:
+ (JSC::CodeBlockSet::writeBarrierCurrentlyExecuting):
+ (JSC::CodeBlockSet::clearCurrentlyExecuting):
+ (JSC::CodeBlockSet::writeBarrierCurrentlyExecutingCodeBlocks): Deleted.
+ * heap/CodeBlockSet.h:
+ * heap/CodeBlockSetInlines.h: Added.
+ (JSC::CodeBlockSet::mark):
+ * heap/ConservativeRoots.cpp:
+ * heap/Heap.cpp:
+ (JSC::Heap::markRoots):
+ (JSC::Heap::beginMarking):
+ (JSC::Heap::collectImpl):
+ (JSC::Heap::writeBarrierCurrentlyExecutingCodeBlocks):
+ (JSC::Heap::clearCurrentlyExecutingCodeBlocks):
+ * heap/Heap.h:
+ * heap/HeapUtil.h:
+ (JSC::HeapUtil::findGCObjectPointersForMarking):
+ * heap/MarkedAllocator.cpp:
+ (JSC::MarkedAllocator::isPagedOut):
+ * heap/MarkedBlock.cpp:
+ (JSC::MarkedBlock::Handle::Handle):
+ (JSC::MarkedBlock::Handle::sweepHelperSelectHasNewlyAllocated):
+ (JSC::MarkedBlock::Handle::stopAllocating):
+ (JSC::MarkedBlock::Handle::lastChanceToFinalize):
+ (JSC::MarkedBlock::Handle::resumeAllocating):
+ (JSC::MarkedBlock::aboutToMarkSlow):
+ (JSC::MarkedBlock::Handle::resetAllocated):
+ (JSC::MarkedBlock::resetMarks):
+ (JSC::MarkedBlock::setNeedsDestruction):
+ (JSC::MarkedBlock::Handle::didAddToAllocator):
+ (JSC::MarkedBlock::Handle::isLive):
+ (JSC::MarkedBlock::Handle::isLiveCell):
+ (JSC::MarkedBlock::clearMarks): Deleted.
+ * heap/MarkedBlock.h:
+ (JSC::MarkedBlock::Handle::newlyAllocatedVersion):
+ (JSC::MarkedBlock::Handle::hasAnyNewlyAllocated): Deleted.
+ (JSC::MarkedBlock::Handle::clearNewlyAllocated): Deleted.
+ * heap/MarkedBlockInlines.h:
+ (JSC::MarkedBlock::Handle::cellsPerBlock):
+ (JSC::MarkedBlock::Handle::isLive):
+ (JSC::MarkedBlock::Handle::isLiveCell):
+ (JSC::MarkedBlock::Handle::isNewlyAllocatedStale):
+ (JSC::MarkedBlock::Handle::hasAnyNewlyAllocatedWithSweep):
+ (JSC::MarkedBlock::Handle::hasAnyNewlyAllocated):
+ (JSC::MarkedBlock::heap):
+ (JSC::MarkedBlock::space):
+ (JSC::MarkedBlock::Handle::space):
+ (JSC::MarkedBlock::resetMarkingVersion): Deleted.
+ * heap/MarkedSpace.cpp:
+ (JSC::MarkedSpace::beginMarking):
+ (JSC::MarkedSpace::endMarking):
+ (JSC::MarkedSpace::clearNewlyAllocated): Deleted.
+ * heap/MarkedSpace.h:
+ (JSC::MarkedSpace::nextVersion):
+ (JSC::MarkedSpace::newlyAllocatedVersion):
+ (JSC::MarkedSpace::markingVersion): Deleted.
+ * runtime/SamplingProfiler.cpp:
+
2016-10-11 Mark Lam <[email protected]>
Array.prototype.concat should not modify frozen objects.
Modified: trunk/Source/_javascript_Core/_javascript_Core.xcodeproj/project.pbxproj (207178 => 207179)
--- trunk/Source/_javascript_Core/_javascript_Core.xcodeproj/project.pbxproj 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/_javascript_Core.xcodeproj/project.pbxproj 2016-10-11 23:52:02 UTC (rev 207179)
@@ -445,6 +445,7 @@
0F64B2791A7957B2006E4E66 /* CallEdge.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F64B2771A7957B2006E4E66 /* CallEdge.cpp */; };
0F64B27A1A7957B2006E4E66 /* CallEdge.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F64B2781A7957B2006E4E66 /* CallEdge.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F64EAF31C4ECD0600621E9B /* AirArgInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F64EAF21C4ECD0600621E9B /* AirArgInlines.h */; };
+ 0F664CE81DA304EF00B00A11 /* CodeBlockSetInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F664CE71DA304ED00B00A11 /* CodeBlockSetInlines.h */; };
0F666EC0183566F900D017F1 /* BytecodeLivenessAnalysisInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F666EBE183566F900D017F1 /* BytecodeLivenessAnalysisInlines.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F666EC1183566F900D017F1 /* FullBytecodeLiveness.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F666EBF183566F900D017F1 /* FullBytecodeLiveness.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F666EC61835672B00D017F1 /* DFGAvailability.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F666EC21835672B00D017F1 /* DFGAvailability.cpp */; };
@@ -2709,6 +2710,7 @@
0F64B2771A7957B2006E4E66 /* CallEdge.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallEdge.cpp; sourceTree = "<group>"; };
0F64B2781A7957B2006E4E66 /* CallEdge.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallEdge.h; sourceTree = "<group>"; };
0F64EAF21C4ECD0600621E9B /* AirArgInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = AirArgInlines.h; path = b3/air/AirArgInlines.h; sourceTree = "<group>"; };
+ 0F664CE71DA304ED00B00A11 /* CodeBlockSetInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CodeBlockSetInlines.h; sourceTree = "<group>"; };
0F666EBE183566F900D017F1 /* BytecodeLivenessAnalysisInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BytecodeLivenessAnalysisInlines.h; sourceTree = "<group>"; };
0F666EBF183566F900D017F1 /* FullBytecodeLiveness.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FullBytecodeLiveness.h; sourceTree = "<group>"; };
0F666EC21835672B00D017F1 /* DFGAvailability.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGAvailability.cpp; path = dfg/DFGAvailability.cpp; sourceTree = "<group>"; };
@@ -5325,6 +5327,7 @@
0F1C3DD91BBCE09E00E523E4 /* CellState.h */,
0FD8A31117D4326C00CA2C40 /* CodeBlockSet.cpp */,
0FD8A31217D4326C00CA2C40 /* CodeBlockSet.h */,
+ 0F664CE71DA304ED00B00A11 /* CodeBlockSetInlines.h */,
146B14DB12EB5B12001BEC1B /* ConservativeRoots.cpp */,
149DAAF212EB559D0083B12B /* ConservativeRoots.h */,
2A7A58EE1808A4C40020BDF7 /* DeferGC.cpp */,
@@ -7960,6 +7963,7 @@
BC18C41C0E16F5CD00B34460 /* JSCallbackObjectFunctions.h in Headers */,
657CF45919BF6662004ACBF2 /* JSCallee.h in Headers */,
A7D801A91880D6A80026C39B /* JSCBuiltins.h in Headers */,
+ 0F664CE81DA304EF00B00A11 /* CodeBlockSetInlines.h in Headers */,
BC1167DA0E19BCC9008066DD /* JSCell.h in Headers */,
0F9749711687ADE400A4FF6A /* JSCellInlines.h in Headers */,
0F1DD84A18A945BE0026F3FA /* JSCInlines.h in Headers */,
Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp (207178 => 207179)
--- trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.cpp 2016-10-11 23:52:02 UTC (rev 207179)
@@ -36,6 +36,7 @@
#include "BytecodeLivenessAnalysis.h"
#include "BytecodeUseDef.h"
#include "CallLinkStatus.h"
+#include "CodeBlockSet.h"
#include "DFGCapabilities.h"
#include "DFGCommon.h"
#include "DFGDriver.h"
Modified: trunk/Source/_javascript_Core/bytecode/CodeBlock.h (207178 => 207179)
--- trunk/Source/_javascript_Core/bytecode/CodeBlock.h 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/bytecode/CodeBlock.h 2016-10-11 23:52:02 UTC (rev 207179)
@@ -35,7 +35,6 @@
#include "CallLinkInfo.h"
#include "CallReturnOffsetToBytecodeOffset.h"
#include "CodeBlockHash.h"
-#include "CodeBlockSet.h"
#include "CodeOrigin.h"
#include "CodeType.h"
#include "CompactJITCodeMap.h"
@@ -76,6 +75,7 @@
namespace JSC {
class BytecodeLivenessAnalysis;
+class CodeBlockSet;
class ExecState;
class JSModuleEnvironment;
class LLIntOffsetsExtractor;
@@ -1311,38 +1311,6 @@
m_visitWeaklyHasBeenCalled.store(false, std::memory_order_relaxed);
}
-inline void CodeBlockSet::mark(const LockHolder& locker, void* candidateCodeBlock)
-{
- ASSERT(m_lock.isLocked());
- // We have to check for 0 and -1 because those are used by the HashMap as markers.
- uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
-
- // This checks for both of those nasty cases in one go.
- // 0 + 1 = 1
- // -1 + 1 = 0
- if (value + 1 <= 1)
- return;
-
- CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock);
- if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock))
- return;
-
- mark(locker, codeBlock);
-}
-
-inline void CodeBlockSet::mark(const LockHolder&, CodeBlock* codeBlock)
-{
- if (!codeBlock)
- return;
-
- // Try to recover gracefully if we forget to execute a barrier for a
- // CodeBlock that does value profiling. This is probably overkill, but we
- // have always done it.
- Heap::heap(codeBlock)->writeBarrier(codeBlock);
-
- m_currentlyExecuting.add(codeBlock);
-}
-
template <typename Functor> inline void ScriptExecutable::forEachCodeBlock(Functor&& functor)
{
switch (type()) {
Modified: trunk/Source/_javascript_Core/heap/CodeBlockSet.cpp (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/CodeBlockSet.cpp 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/CodeBlockSet.cpp 2016-10-11 23:52:02 UTC (rev 207179)
@@ -107,7 +107,7 @@
return m_oldCodeBlocks.contains(codeBlock) || m_newCodeBlocks.contains(codeBlock) || m_currentlyExecuting.contains(codeBlock);
}
-void CodeBlockSet::writeBarrierCurrentlyExecutingCodeBlocks(Heap* heap)
+void CodeBlockSet::writeBarrierCurrentlyExecuting(Heap* heap)
{
LockHolder locker(&m_lock);
if (verbose)
@@ -114,9 +114,10 @@
dataLog("Remembering ", m_currentlyExecuting.size(), " code blocks.\n");
for (CodeBlock* codeBlock : m_currentlyExecuting)
heap->writeBarrier(codeBlock);
+}
- // It's safe to clear this set because we won't delete the CodeBlocks
- // in it until the next GC, and we'll recompute it at that time.
+void CodeBlockSet::clearCurrentlyExecuting()
+{
m_currentlyExecuting.clear();
}
Modified: trunk/Source/_javascript_Core/heap/CodeBlockSet.h (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/CodeBlockSet.h 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/CodeBlockSet.h 2016-10-11 23:52:02 UTC (rev 207179)
@@ -71,8 +71,10 @@
// Add all currently executing CodeBlocks to the remembered set to be
// re-scanned during the next collection.
- void writeBarrierCurrentlyExecutingCodeBlocks(Heap*);
+ void writeBarrierCurrentlyExecuting(Heap*);
+ void clearCurrentlyExecuting();
+
bool contains(const LockHolder&, void* candidateCodeBlock);
Lock& getLock() { return m_lock; }
Added: trunk/Source/_javascript_Core/heap/CodeBlockSetInlines.h (0 => 207179)
--- trunk/Source/_javascript_Core/heap/CodeBlockSetInlines.h (rev 0)
+++ trunk/Source/_javascript_Core/heap/CodeBlockSetInlines.h 2016-10-11 23:52:02 UTC (rev 207179)
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "CodeBlock.h"
+#include "CodeBlockSet.h"
+
+namespace JSC {
+
+inline void CodeBlockSet::mark(const LockHolder& locker, void* candidateCodeBlock)
+{
+ ASSERT(m_lock.isLocked());
+ // We have to check for 0 and -1 because those are used by the HashMap as markers.
+ uintptr_t value = reinterpret_cast<uintptr_t>(candidateCodeBlock);
+
+ // This checks for both of those nasty cases in one go.
+ // 0 + 1 = 1
+ // -1 + 1 = 0
+ if (value + 1 <= 1)
+ return;
+
+ CodeBlock* codeBlock = static_cast<CodeBlock*>(candidateCodeBlock);
+ if (!m_oldCodeBlocks.contains(codeBlock) && !m_newCodeBlocks.contains(codeBlock))
+ return;
+
+ mark(locker, codeBlock);
+}
+
+inline void CodeBlockSet::mark(const LockHolder&, CodeBlock* codeBlock)
+{
+ if (!codeBlock)
+ return;
+
+ m_currentlyExecuting.add(codeBlock);
+}
+
+} // namespace JSC
+
Modified: trunk/Source/_javascript_Core/heap/ConservativeRoots.cpp (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/ConservativeRoots.cpp 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/ConservativeRoots.cpp 2016-10-11 23:52:02 UTC (rev 207179)
@@ -27,7 +27,7 @@
#include "ConservativeRoots.h"
#include "CodeBlock.h"
-#include "CodeBlockSet.h"
+#include "CodeBlockSetInlines.h"
#include "HeapInlines.h"
#include "HeapUtil.h"
#include "JITStubRoutineSet.h"
Modified: trunk/Source/_javascript_Core/heap/Heap.cpp (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/Heap.cpp 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/Heap.cpp 2016-10-11 23:52:02 UTC (rev 207179)
@@ -22,6 +22,7 @@
#include "Heap.h"
#include "CodeBlock.h"
+#include "CodeBlockSet.h"
#include "ConservativeRoots.h"
#include "DFGWorklist.h"
#include "EdenGCActivityCallback.h"
@@ -388,18 +389,8 @@
HeapRootVisitor heapRootVisitor(m_slotVisitor);
- ConservativeRoots conservativeRoots(*this);
{
TimingScope preConvergenceTimingScope(*this, "Heap::markRoots before convergence");
- // We gather conservative roots before clearing mark bits because conservative
- // gathering uses the mark bits to determine whether a reference is valid.
- {
- TimingScope preConvergenceTimingScope(*this, "Heap::markRoots conservative scan");
- SuperSamplerScope superSamplerScope(false);
- gatherStackRoots(conservativeRoots, stackOrigin, stackTop, calleeSavedRegisters);
- gatherJSStackRoots(conservativeRoots);
- gatherScratchBufferRoots(conservativeRoots);
- }
#if ENABLE(DFG_JIT)
DFG::rememberCodeBlocks(*m_vm);
@@ -463,9 +454,24 @@
ParallelModeEnabler enabler(m_slotVisitor);
m_slotVisitor.donateAndDrain();
+
+ {
+ TimingScope preConvergenceTimingScope(*this, "Heap::markRoots conservative scan");
+ ConservativeRoots conservativeRoots(*this);
+ SuperSamplerScope superSamplerScope(false);
+ gatherStackRoots(conservativeRoots, stackOrigin, stackTop, calleeSavedRegisters);
+ gatherJSStackRoots(conservativeRoots);
+ gatherScratchBufferRoots(conservativeRoots);
+ visitConservativeRoots(conservativeRoots);
+
+ // We want to do this to conservatively ensure that we rescan any code blocks that are
+ // running right now. However, we need to be sure to do it *after* we mark the code block
+ // so that we know for sure if it really needs a barrier.
+ m_codeBlocks->writeBarrierCurrentlyExecuting(this);
+ }
+
visitExternalRememberedSet();
visitSmallStrings();
- visitConservativeRoots(conservativeRoots);
visitProtectedObjects(heapRootVisitor);
visitArgumentBuffers(heapRootVisitor);
visitException(heapRootVisitor);
@@ -524,11 +530,6 @@
m_codeBlocks->clearMarksForFullCollection();
{
- TimingScope clearNewlyAllocatedTimingScope(*this, "m_objectSpace.clearNewlyAllocated");
- m_objectSpace.clearNewlyAllocated();
- }
-
- {
TimingScope clearMarksTimingScope(*this, "m_objectSpace.beginMarking");
m_objectSpace.beginMarking();
}
@@ -1059,7 +1060,8 @@
deleteSourceProviderCaches();
notifyIncrementalSweeper();
- writeBarrierCurrentlyExecutingCodeBlocks();
+ m_codeBlocks->writeBarrierCurrentlyExecuting(this);
+ m_codeBlocks->clearCurrentlyExecuting();
prepareForAllocation();
updateAllocationLimits();
@@ -1201,11 +1203,6 @@
m_sweeper->startSweeping();
}
-void Heap::writeBarrierCurrentlyExecutingCodeBlocks()
-{
- m_codeBlocks->writeBarrierCurrentlyExecutingCodeBlocks(this);
-}
-
void Heap::prepareForAllocation()
{
m_objectSpace.prepareForAllocation();
Modified: trunk/Source/_javascript_Core/heap/Heap.h (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/Heap.h 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/Heap.h 2016-10-11 23:52:02 UTC (rev 207179)
@@ -349,7 +349,6 @@
void snapshotUnswept();
void deleteSourceProviderCaches();
void notifyIncrementalSweeper();
- void writeBarrierCurrentlyExecutingCodeBlocks();
void prepareForAllocation();
void harvestWeakReferences();
void finalizeUnconditionalFinalizers();
Modified: trunk/Source/_javascript_Core/heap/HeapUtil.h (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/HeapUtil.h 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/HeapUtil.h 2016-10-11 23:52:02 UTC (rev 207179)
@@ -51,6 +51,9 @@
{
const HashSet<MarkedBlock*>& set = heap.objectSpace().blocks().set();
+ ASSERT(heap.objectSpace().isMarking());
+ static const bool isMarking = true;
+
char* pointer = static_cast<char*>(passedPointer);
// It could point to a large allocation.
@@ -85,7 +88,7 @@
&& set.contains(previousCandidate)
&& previousCandidate->handle().cellKind() == HeapCell::Auxiliary) {
previousPointer = static_cast<char*>(previousCandidate->handle().cellAlign(previousPointer));
- if (previousCandidate->handle().isLiveCell(markingVersion, previousPointer))
+ if (previousCandidate->handle().isLiveCell(markingVersion, isMarking, previousPointer))
func(previousPointer);
}
}
@@ -99,7 +102,7 @@
return;
auto tryPointer = [&] (void* pointer) {
- if (candidate->handle().isLiveCell(markingVersion, pointer))
+ if (candidate->handle().isLiveCell(markingVersion, isMarking, pointer))
func(pointer);
};
Modified: trunk/Source/_javascript_Core/heap/MarkedAllocator.cpp (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/MarkedAllocator.cpp 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/MarkedAllocator.cpp 2016-10-11 23:52:02 UTC (rev 207179)
@@ -53,11 +53,8 @@
unsigned itersSinceLastTimeCheck = 0;
for (size_t index = 0; index < m_blocks.size(); ++index) {
MarkedBlock::Handle* block = m_blocks[index];
- if (block) {
- // Forces us to touch the memory of the block, but has no semantic effect.
- if (block->areMarksStale())
- block->block().resetMarkingVersion();
- }
+ if (block)
+ block->block().updateNeedsDestruction();
++itersSinceLastTimeCheck;
if (itersSinceLastTimeCheck >= Heap::s_timeCheckResolution) {
double currentTime = WTF::monotonicallyIncreasingTime();
@@ -436,7 +433,8 @@
{
m_unswept.forEachSetBit(
[&] (size_t index) {
- m_blocks[index]->sweep();
+ MarkedBlock::Handle* block = m_blocks[index];
+ block->sweep();
});
}
Modified: trunk/Source/_javascript_Core/heap/MarkedBlock.cpp (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/MarkedBlock.cpp 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/MarkedBlock.cpp 2016-10-11 23:52:02 UTC (rev 207179)
@@ -54,6 +54,7 @@
MarkedBlock::Handle::Handle(Heap& heap, void* blockSpace)
: m_weakSet(heap.vm(), CellContainer())
+ , m_newlyAllocatedVersion(MarkedSpace::nullVersion)
{
m_block = new (NotNull, blockSpace) MarkedBlock(*heap.vm(), *this);
@@ -125,7 +126,7 @@
for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
if (emptyMode == NotEmpty
&& ((marksMode == MarksNotStale && block.m_marks.get(i))
- || (newlyAllocatedMode == HasNewlyAllocated && m_newlyAllocated->get(i)))) {
+ || (newlyAllocatedMode == HasNewlyAllocated && m_newlyAllocated.get(i)))) {
isEmpty = false;
continue;
}
@@ -148,7 +149,7 @@
// We only want to discard the newlyAllocated bits if we're creating a FreeList,
// otherwise we would lose information on what's currently alive.
if (sweepMode == SweepToFreeList && newlyAllocatedMode == HasNewlyAllocated)
- m_newlyAllocated = nullptr;
+ m_newlyAllocatedVersion = MarkedSpace::nullVersion;
FreeList result = FreeList::list(head, count * cellSize());
if (sweepMode == SweepToFreeList)
@@ -207,7 +208,7 @@
template<MarkedBlock::Handle::EmptyMode emptyMode, DestructionMode destructionMode, MarkedBlock::Handle::ScribbleMode scribbleMode>
FreeList MarkedBlock::Handle::sweepHelperSelectHasNewlyAllocated(SweepMode sweepMode)
{
- if (m_newlyAllocated)
+ if (hasAnyNewlyAllocated())
return sweepHelperSelectSweepMode<emptyMode, destructionMode, scribbleMode, HasNewlyAllocated>(sweepMode);
return sweepHelperSelectSweepMode<emptyMode, destructionMode, scribbleMode, DoesNotHaveNewlyAllocated>(sweepMode);
}
@@ -275,8 +276,8 @@
// allocated from our free list are not currently marked, so we need another
// way to tell what's live vs dead.
- ASSERT(!m_newlyAllocated);
- m_newlyAllocated = std::make_unique<WTF::Bitmap<atomsPerBlock>>();
+ m_newlyAllocated.clearAll();
+ m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion();
SetNewlyAllocatedFunctor functor(this);
forEachCell(functor);
@@ -295,10 +296,12 @@
void MarkedBlock::Handle::lastChanceToFinalize()
{
allocator()->setIsAllocated(this, false);
- m_block->clearMarks();
+ m_block->m_marks.clearAll();
+ m_block->clearHasAnyMarked();
+ m_block->m_markingVersion = heap()->objectSpace().markingVersion();
m_weakSet.lastChanceToFinalize();
-
- clearNewlyAllocated();
+ m_newlyAllocated.clearAll();
+ m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion();
sweep();
}
@@ -307,7 +310,7 @@
ASSERT(!allocator()->isAllocated(this));
ASSERT(!isFreeListed());
- if (!m_newlyAllocated) {
+ if (!hasAnyNewlyAllocated()) {
// This means we had already exhausted the block when we stopped allocation.
return FreeList();
}
@@ -346,24 +349,56 @@
{
ASSERT(vm()->heap.objectSpace().isMarking());
LockHolder locker(m_lock);
- if (areMarksStale(markingVersion)) {
- clearMarks(markingVersion);
- // This means we're the first ones to mark any object in this block.
- handle().allocator()->atomicSetAndCheckIsMarkingNotEmpty(&handle(), true);
+ if (!areMarksStale(markingVersion))
+ return;
+
+ if (handle().allocator()->isAllocated(&handle())
+ || !marksConveyLivenessDuringMarking(markingVersion)) {
+ // We already know that the block is full and is already recognized as such, or that the
+ // block did not survive the previous GC. So, we can clear mark bits the old fashioned
+ // way. Note that it's possible for such a block to have newlyAllocated with an up-to-
+ // date version! If it does, then we want to leave the newlyAllocated alone, since that
+ // means that we had allocated in this previously empty block but did not fill it up, so
+ // we created a newlyAllocated.
+ m_marks.clearAll();
+ } else {
+ HeapVersion newlyAllocatedVersion = space()->newlyAllocatedVersion();
+ if (handle().m_newlyAllocatedVersion == newlyAllocatedVersion) {
+ // Merge the contents of marked into newlyAllocated. If we get the full set of bits
+ // then invalidate newlyAllocated and set allocated.
+ handle().m_newlyAllocated.mergeAndClear(m_marks);
+ } else {
+ // Replace the contents of newlyAllocated with marked. If we get the full set of
+ // bits then invalidate newlyAllocated and set allocated.
+ handle().m_newlyAllocated.setAndClear(m_marks);
+ }
+ handle().m_newlyAllocatedVersion = newlyAllocatedVersion;
}
+ clearHasAnyMarked();
+ WTF::storeStoreFence();
+ m_markingVersion = markingVersion;
+
+ // This means we're the first ones to mark any object in this block.
+ handle().allocator()->atomicSetAndCheckIsMarkingNotEmpty(&handle(), true);
}
-void MarkedBlock::clearMarks()
+void MarkedBlock::Handle::resetAllocated()
{
- clearMarks(vm()->heap.objectSpace().markingVersion());
+ m_newlyAllocated.clearAll();
+ m_newlyAllocatedVersion = MarkedSpace::nullVersion;
}
-void MarkedBlock::clearMarks(HeapVersion markingVersion)
+void MarkedBlock::resetMarks()
{
- m_marks.clearAll();
- clearHasAnyMarked();
- WTF::storeStoreFence();
- m_markingVersion = markingVersion;
+ // We want aboutToMarkSlow() to see what the mark bits were after the last collection. It uses
+ // the version number to distinguish between the marks having already been stale before
+ // beginMarking(), or just stale now that beginMarking() bumped the version. If we have a version
+ // wraparound, then we will call this method before resetting the version to null. When the
+ // version is null, aboutToMarkSlow() will assume that the marks were not stale as of before
+ // beginMarking(). Hence the need to whip the marks into shape.
+ if (areMarksStale())
+ m_marks.clearAll();
+ m_markingVersion = MarkedSpace::nullVersion;
}
#if !ASSERT_DISABLED
@@ -425,6 +460,11 @@
m_allocator->removeBlock(this);
}
+void MarkedBlock::updateNeedsDestruction()
+{
+ m_needsDestruction = handle().needsDestruction();
+}
+
void MarkedBlock::Handle::didAddToAllocator(MarkedAllocator* allocator, size_t index)
{
ASSERT(m_index == std::numeric_limits<size_t>::max());
@@ -442,10 +482,9 @@
if (m_attributes.cellKind != HeapCell::JSCell)
RELEASE_ASSERT(m_attributes.destruction == DoesNotNeedDestruction);
- block().m_needsDestruction = needsDestruction();
+ block().updateNeedsDestruction();
- unsigned cellsPerBlock = MarkedSpace::blockPayload / cellSize;
- double markCountBias = -(Options::minMarkedBlockUtilization() * cellsPerBlock);
+ double markCountBias = -(Options::minMarkedBlockUtilization() * cellsPerBlock());
// The mark count bias should be comfortably within this range.
RELEASE_ASSERT(markCountBias > static_cast<double>(std::numeric_limits<int16_t>::min()));
@@ -466,12 +505,12 @@
bool MarkedBlock::Handle::isLive(const HeapCell* cell)
{
- return isLive(vm()->heap.objectSpace().markingVersion(), cell);
+ return isLive(space()->markingVersion(), space()->isMarking(), cell);
}
bool MarkedBlock::Handle::isLiveCell(const void* p)
{
- return isLiveCell(vm()->heap.objectSpace().markingVersion(), p);
+ return isLiveCell(space()->markingVersion(), space()->isMarking(), p);
}
} // namespace JSC
Modified: trunk/Source/_javascript_Core/heap/MarkedBlock.h (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/MarkedBlock.h 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/MarkedBlock.h 2016-10-11 23:52:02 UTC (rev 207179)
@@ -40,6 +40,7 @@
class Heap;
class JSCell;
class MarkedAllocator;
+class MarkedSpace;
typedef uintptr_t Bits;
typedef uint32_t HeapVersion;
@@ -110,6 +111,7 @@
MarkedAllocator* allocator() const;
Heap* heap() const;
+ inline MarkedSpace* space() const;
VM* vm() const;
WeakSet& weakSet();
@@ -141,11 +143,9 @@
void stopAllocating(const FreeList&);
FreeList resumeAllocating(); // Call this if you canonicalized a block for some non-collection related purpose.
- // Returns true if the "newly allocated" bitmap was non-null
- // and was successfully cleared and false otherwise.
- bool clearNewlyAllocated();
-
size_t cellSize();
+ inline unsigned cellsPerBlock();
+
const AllocatorAttributes& attributes() const;
DestructionMode destruction() const;
bool needsDestruction() const;
@@ -153,9 +153,9 @@
size_t markCount();
size_t size();
-
- inline bool isLive(HeapVersion markingVersion, const HeapCell*);
- inline bool isLiveCell(HeapVersion markingVersion, const void*);
+
+ inline bool isLive(HeapVersion markingVersion, bool isMarking, const HeapCell*);
+ inline bool isLiveCell(HeapVersion markingVersion, bool isMarking, const void*);
bool isLive(const HeapCell*);
bool isLiveCell(const void*);
@@ -164,8 +164,13 @@
void setNewlyAllocated(const void*);
void clearNewlyAllocated(const void*);
- bool hasAnyNewlyAllocated() const { return !!m_newlyAllocated; }
-
+ HeapVersion newlyAllocatedVersion() const { return m_newlyAllocatedVersion; }
+
+ inline bool isNewlyAllocatedStale() const;
+
+ inline bool hasAnyNewlyAllocated();
+ void resetAllocated();
+
template <typename Functor> IterationStatus forEachCell(const Functor&);
template <typename Functor> inline IterationStatus forEachLiveCell(const Functor&);
template <typename Functor> inline IterationStatus forEachDeadCell(const Functor&);
@@ -185,7 +190,7 @@
private:
Handle(Heap&, void*);
-
+
template<DestructionMode>
FreeList sweepHelperSelectScribbleMode(SweepMode = SweepOnly);
@@ -223,7 +228,7 @@
size_t m_atomsPerCell { std::numeric_limits<size_t>::max() };
size_t m_endAtom { std::numeric_limits<size_t>::max() }; // This is a fuzzy end. Always test for < m_endAtom.
- std::unique_ptr<WTF::Bitmap<atomsPerBlock>> m_newlyAllocated;
+ WTF::Bitmap<atomsPerBlock> m_newlyAllocated;
AllocatorAttributes m_attributes;
bool m_isFreeListed { false };
@@ -231,6 +236,8 @@
MarkedAllocator* m_allocator { nullptr };
size_t m_index { std::numeric_limits<size_t>::max() };
WeakSet m_weakSet;
+
+ HeapVersion m_newlyAllocatedVersion;
MarkedBlock* m_block { nullptr };
};
@@ -240,6 +247,8 @@
Handle& handle();
VM* vm() const;
+ inline Heap* heap() const;
+ inline MarkedSpace* space() const;
static bool isAtomAligned(const void*);
static MarkedBlock* blockFor(const void*);
@@ -278,8 +287,11 @@
bool needsDestruction() const { return m_needsDestruction; }
- inline void resetMarkingVersion();
+ // This is usually a no-op, and we use it as a no-op that touches the page in isPagedOut().
+ void updateNeedsDestruction();
+ void resetMarks();
+
private:
static const size_t atomAlignmentMask = atomSize - 1;
@@ -289,13 +301,13 @@
Atom* atoms();
void aboutToMarkSlow(HeapVersion markingVersion);
- void clearMarks();
- void clearMarks(HeapVersion markingVersion);
void clearHasAnyMarked();
void noteMarkedSlow();
+
+ inline bool marksConveyLivenessDuringMarking(HeapVersion markingVersion);
- WTF::Bitmap<atomsPerBlock, WTF::BitmapAtomic, uint8_t> m_marks;
+ WTF::Bitmap<atomsPerBlock> m_marks;
bool m_needsDestruction;
Lock m_lock;
@@ -521,28 +533,19 @@
inline bool MarkedBlock::Handle::isNewlyAllocated(const void* p)
{
- return m_newlyAllocated->get(m_block->atomNumber(p));
+ return m_newlyAllocated.get(m_block->atomNumber(p));
}
inline void MarkedBlock::Handle::setNewlyAllocated(const void* p)
{
- m_newlyAllocated->set(m_block->atomNumber(p));
+ m_newlyAllocated.set(m_block->atomNumber(p));
}
inline void MarkedBlock::Handle::clearNewlyAllocated(const void* p)
{
- m_newlyAllocated->clear(m_block->atomNumber(p));
+ m_newlyAllocated.clear(m_block->atomNumber(p));
}
-inline bool MarkedBlock::Handle::clearNewlyAllocated()
-{
- if (m_newlyAllocated) {
- m_newlyAllocated = nullptr;
- return true;
- }
- return false;
-}
-
inline bool MarkedBlock::isAtom(const void* p)
{
ASSERT(MarkedBlock::isAtomAligned(p));
Modified: trunk/Source/_javascript_Core/heap/MarkedBlockInlines.h (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/MarkedBlockInlines.h 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/MarkedBlockInlines.h 2016-10-11 23:52:02 UTC (rev 207179)
@@ -27,11 +27,36 @@
#include "MarkedAllocator.h"
#include "MarkedBlock.h"
+#include "MarkedSpace.h"
namespace JSC {
-inline bool MarkedBlock::Handle::isLive(HeapVersion markingVersion, const HeapCell* cell)
+inline unsigned MarkedBlock::Handle::cellsPerBlock()
{
+ return MarkedSpace::blockPayload / cellSize();
+}
+
+inline bool MarkedBlock::marksConveyLivenessDuringMarking(HeapVersion markingVersion)
+{
+ // This returns true if any of these is true:
+ // - We just created the block and so the bits are clear already.
+ // - This block has objects marked during the last GC, and so its version was up-to-date just
+ // before the current collection did beginMarking(). This means that any objects that have
+ // their mark bit set are valid objects that were never deleted, and so are candidates for
+ // marking in any conservative scan. Using our jargon, they are "live".
+ // - We did ~2^32 collections and rotated the version back to null, so we needed to hard-reset
+ // everything. If the marks had been stale, we would have cleared them. So, we can be sure that
+ // any set mark bit reflects objects marked during last GC, i.e. "live" objects.
+ // It would be absurd to use this method when not collecting, since this special "one version
+ // back" state only makes sense when we're in a concurrent collection and have to be
+ // conservative.
+ ASSERT(space()->isMarking());
+ return m_markingVersion == MarkedSpace::nullVersion
+ || MarkedSpace::nextVersion(m_markingVersion) == markingVersion;
+}
+
+inline bool MarkedBlock::Handle::isLive(HeapVersion markingVersion, bool isMarking, const HeapCell* cell)
+{
ASSERT(!isFreeListed());
if (UNLIKELY(hasAnyNewlyAllocated())) {
@@ -39,24 +64,38 @@
return true;
}
- MarkedBlock& block = this->block();
-
if (allocator()->isAllocated(this))
return true;
- if (block.areMarksStale(markingVersion))
- return false;
+ MarkedBlock& block = this->block();
+
+ if (block.areMarksStale()) {
+ if (!isMarking)
+ return false;
+ if (!block.marksConveyLivenessDuringMarking(markingVersion))
+ return false;
+ }
- return block.isMarked(cell);
+ return block.m_marks.get(block.atomNumber(cell));
}
-inline bool MarkedBlock::Handle::isLiveCell(HeapVersion markingVersion, const void* p)
+inline bool MarkedBlock::Handle::isLiveCell(HeapVersion markingVersion, bool isMarking, const void* p)
{
if (!m_block->isAtom(p))
return false;
- return isLive(markingVersion, static_cast<const HeapCell*>(p));
+ return isLive(markingVersion, isMarking, static_cast<const HeapCell*>(p));
}
+inline bool MarkedBlock::Handle::isNewlyAllocatedStale() const
+{
+ return m_newlyAllocatedVersion != space()->newlyAllocatedVersion();
+}
+
+inline bool MarkedBlock::Handle::hasAnyNewlyAllocated()
+{
+ return !isNewlyAllocatedStale();
+}
+
template <typename Functor>
inline IterationStatus MarkedBlock::Handle::forEachLiveCell(const Functor& functor)
{
@@ -87,10 +126,20 @@
return IterationStatus::Continue;
}
-inline void MarkedBlock::resetMarkingVersion()
+inline Heap* MarkedBlock::heap() const
{
- m_markingVersion = MarkedSpace::nullVersion;
+ return &vm()->heap;
}
+inline MarkedSpace* MarkedBlock::space() const
+{
+ return &heap()->objectSpace();
+}
+
+inline MarkedSpace* MarkedBlock::Handle::space() const
+{
+ return &heap()->objectSpace();
+}
+
} // namespace JSC
Modified: trunk/Source/_javascript_Core/heap/MarkedSpace.cpp (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/MarkedSpace.cpp 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/MarkedSpace.cpp 2016-10-11 23:52:02 UTC (rev 207179)
@@ -451,29 +451,6 @@
});
}
-void MarkedSpace::clearNewlyAllocated()
-{
- forEachAllocator(
- [&] (MarkedAllocator& allocator) -> IterationStatus {
- if (MarkedBlock::Handle* block = allocator.takeLastActiveBlock())
- block->clearNewlyAllocated();
- return IterationStatus::Continue;
- });
-
- for (unsigned i = m_largeAllocationsOffsetForThisCollection; i < m_largeAllocations.size(); ++i)
- m_largeAllocations[i]->clearNewlyAllocated();
-
- if (!ASSERT_DISABLED) {
- forEachBlock(
- [&] (MarkedBlock::Handle* block) {
- ASSERT_UNUSED(block, !block->clearNewlyAllocated());
- });
-
- for (LargeAllocation* allocation : m_largeAllocations)
- ASSERT_UNUSED(allocation, !allocation->isNewlyAllocated());
- }
-}
-
void MarkedSpace::beginMarking()
{
if (m_heap->operationInProgress() == FullCollection) {
@@ -483,16 +460,15 @@
return IterationStatus::Continue;
});
- m_markingVersion = nextVersion(m_markingVersion);
-
- if (UNLIKELY(m_markingVersion == initialVersion)) {
- // Oh no! Version wrap-around! We handle this by setting all block versions to null.
+ if (UNLIKELY(nextVersion(m_markingVersion) == initialVersion)) {
forEachBlock(
[&] (MarkedBlock::Handle* handle) {
- handle->block().resetMarkingVersion();
+ handle->block().resetMarks();
});
}
+ m_markingVersion = nextVersion(m_markingVersion);
+
for (LargeAllocation* allocation : m_largeAllocations)
allocation->flip();
}
@@ -511,6 +487,23 @@
void MarkedSpace::endMarking()
{
+ if (UNLIKELY(nextVersion(m_newlyAllocatedVersion) == initialVersion)) {
+ forEachBlock(
+ [&] (MarkedBlock::Handle* handle) {
+ handle->resetAllocated();
+ });
+ }
+
+ m_newlyAllocatedVersion = nextVersion(m_newlyAllocatedVersion);
+
+ for (unsigned i = m_largeAllocationsOffsetForThisCollection; i < m_largeAllocations.size(); ++i)
+ m_largeAllocations[i]->clearNewlyAllocated();
+
+ if (!ASSERT_DISABLED) {
+ for (LargeAllocation* allocation : m_largeAllocations)
+ ASSERT_UNUSED(allocation, !allocation->isNewlyAllocated());
+ }
+
forEachAllocator(
[&] (MarkedAllocator& allocator) -> IterationStatus {
allocator.endMarking();
Modified: trunk/Source/_javascript_Core/heap/MarkedSpace.h (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/MarkedSpace.h 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/MarkedSpace.h 2016-10-11 23:52:02 UTC (rev 207179)
@@ -65,13 +65,13 @@
static const size_t numSizeClasses = largeCutoff / sizeStep;
static const HeapVersion nullVersion = 0; // The version of freshly allocated blocks.
- static const HeapVersion initialVersion = 1; // The version that the heap starts out with.
+ static const HeapVersion initialVersion = 2; // The version that the heap starts out with. Set to make sure that nextVersion(nullVersion) != initialVersion.
- HeapVersion nextVersion(HeapVersion version)
+ static HeapVersion nextVersion(HeapVersion version)
{
version++;
if (version == nullVersion)
- version++;
+ version = initialVersion;
return version;
}
@@ -171,6 +171,7 @@
bool isPagedOut(double deadline);
HeapVersion markingVersion() const { return m_markingVersion; }
+ HeapVersion newlyAllocatedVersion() const { return m_newlyAllocatedVersion; }
const Vector<LargeAllocation*>& largeAllocations() const { return m_largeAllocations; }
unsigned largeAllocationsNurseryOffset() const { return m_largeAllocationsNurseryOffset; }
@@ -217,6 +218,7 @@
Heap* m_heap;
HeapVersion m_markingVersion { initialVersion };
+ HeapVersion m_newlyAllocatedVersion { initialVersion };
size_t m_capacity;
bool m_isIterating;
bool m_isMarking { false };
Modified: trunk/Source/_javascript_Core/heap/SlotVisitor.cpp (207178 => 207179)
--- trunk/Source/_javascript_Core/heap/SlotVisitor.cpp 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/heap/SlotVisitor.cpp 2016-10-11 23:52:02 UTC (rev 207179)
@@ -191,8 +191,6 @@
validate(cell);
#endif
- //dataLog(" Marking ", RawPointer(cell), "\n");
-
if (cell->isLargeAllocation())
setMarkedAndAppendToMarkStack(cell->largeAllocation(), cell);
else
Modified: trunk/Source/_javascript_Core/runtime/SamplingProfiler.cpp (207178 => 207179)
--- trunk/Source/_javascript_Core/runtime/SamplingProfiler.cpp 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/_javascript_Core/runtime/SamplingProfiler.cpp 2016-10-11 23:52:02 UTC (rev 207179)
@@ -30,6 +30,7 @@
#include "CallFrame.h"
#include "CodeBlock.h"
+#include "CodeBlockSet.h"
#include "Executable.h"
#include "HeapInlines.h"
#include "HeapIterationScope.h"
Modified: trunk/Source/WTF/ChangeLog (207178 => 207179)
--- trunk/Source/WTF/ChangeLog 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/WTF/ChangeLog 2016-10-11 23:52:02 UTC (rev 207179)
@@ -1,3 +1,45 @@
+2016-10-08 Filip Pizlo <[email protected]>
+
+ MarkedBlock should know what objects are live during marking
+ https://bugs.webkit.org/show_bug.cgi?id=162309
+
+ Reviewed by Geoffrey Garen.
+
+ This removes the atomicity mode, because it's not really used: it only affects the
+ concurrentBlah methods, but their only users turn on atomicity. This was useful because
+ previously, some binary Bitmap methods (like merge(const Bitmap&)) couldn't be used
+ effectively in the GC because some of the GC's bitmaps set the atomic mode and some didn't.
+ Removing this useless mode is the best solution.
+
+ Also added some new binary Bitmap methods: mergeAndClear(Bitmap& other) and
+ setAndClear(Bitmap& other). They perform their action on 'this' (either merge or set,
+ respectively) while also clearing the contents of 'other'. This is great for one of the GC
+ hot paths.
+
+ * wtf/Bitmap.h:
+ (WTF::WordType>::Bitmap):
+ (WTF::WordType>::get):
+ (WTF::WordType>::set):
+ (WTF::WordType>::testAndSet):
+ (WTF::WordType>::testAndClear):
+ (WTF::WordType>::concurrentTestAndSet):
+ (WTF::WordType>::concurrentTestAndClear):
+ (WTF::WordType>::clear):
+ (WTF::WordType>::clearAll):
+ (WTF::WordType>::nextPossiblyUnset):
+ (WTF::WordType>::findRunOfZeros):
+ (WTF::WordType>::count):
+ (WTF::WordType>::isEmpty):
+ (WTF::WordType>::isFull):
+ (WTF::WordType>::merge):
+ (WTF::WordType>::filter):
+ (WTF::WordType>::exclude):
+ (WTF::WordType>::forEachSetBit):
+ (WTF::WordType>::mergeAndClear):
+ (WTF::WordType>::setAndClear):
+ (WTF::=):
+ (WTF::WordType>::hash):
+
2016-10-11 Said Abou-Hallawa <[email protected]>
Add SynchronizedFixedQueue class
Modified: trunk/Source/WTF/wtf/Bitmap.h (207178 => 207179)
--- trunk/Source/WTF/wtf/Bitmap.h 2016-10-11 23:25:38 UTC (rev 207178)
+++ trunk/Source/WTF/wtf/Bitmap.h 2016-10-11 23:52:02 UTC (rev 207179)
@@ -27,16 +27,7 @@
namespace WTF {
-enum BitmapAtomicMode {
- // This makes concurrentTestAndSet behave just like testAndSet.
- BitmapNotAtomic,
-
- // This makes concurrentTestAndSet use compareAndSwap, so that it's
- // atomic even when used concurrently.
- BitmapAtomic
-};
-
-template<size_t bitmapSize, BitmapAtomicMode atomicMode = BitmapNotAtomic, typename WordType = uint32_t>
+template<size_t bitmapSize, typename WordType = uint32_t>
class Bitmap {
WTF_MAKE_FAST_ALLOCATED;
@@ -59,8 +50,8 @@
size_t nextPossiblyUnset(size_t) const;
void clear(size_t);
void clearAll();
- int64_t findRunOfZeros(size_t) const;
- size_t count(size_t = 0) const;
+ int64_t findRunOfZeros(size_t runLength) const;
+ size_t count(size_t start = 0) const;
size_t isEmpty() const;
size_t isFull() const;
@@ -71,6 +62,9 @@
template<typename Func>
void forEachSetBit(const Func&) const;
+ void mergeAndClear(Bitmap&);
+ void setAndClear(Bitmap&);
+
bool operator==(const Bitmap&) const;
bool operator!=(const Bitmap&) const;
@@ -90,26 +84,26 @@
std::array<WordType, words> bits;
};
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline Bitmap<bitmapSize, atomicMode, WordType>::Bitmap()
+template<size_t bitmapSize, typename WordType>
+inline Bitmap<bitmapSize, WordType>::Bitmap()
{
clearAll();
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline bool Bitmap<bitmapSize, atomicMode, WordType>::get(size_t n) const
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::get(size_t n) const
{
return !!(bits[n / wordSize] & (one << (n % wordSize)));
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline void Bitmap<bitmapSize, atomicMode, WordType>::set(size_t n)
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::set(size_t n)
{
bits[n / wordSize] |= (one << (n % wordSize));
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline void Bitmap<bitmapSize, atomicMode, WordType>::set(size_t n, bool value)
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::set(size_t n, bool value)
{
if (value)
set(n);
@@ -117,8 +111,8 @@
clear(n);
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline bool Bitmap<bitmapSize, atomicMode, WordType>::testAndSet(size_t n)
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::testAndSet(size_t n)
{
WordType mask = one << (n % wordSize);
size_t index = n / wordSize;
@@ -127,8 +121,8 @@
return result;
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline bool Bitmap<bitmapSize, atomicMode, WordType>::testAndClear(size_t n)
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::testAndClear(size_t n)
{
WordType mask = one << (n % wordSize);
size_t index = n / wordSize;
@@ -137,14 +131,9 @@
return result;
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline bool Bitmap<bitmapSize, atomicMode, WordType>::concurrentTestAndSet(size_t n)
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::concurrentTestAndSet(size_t n)
{
- if (atomicMode == BitmapNotAtomic)
- return testAndSet(n);
-
- ASSERT(atomicMode == BitmapAtomic);
-
WordType mask = one << (n % wordSize);
size_t index = n / wordSize;
WordType* wordPtr = bits.data() + index;
@@ -157,14 +146,9 @@
return false;
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline bool Bitmap<bitmapSize, atomicMode, WordType>::concurrentTestAndClear(size_t n)
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::concurrentTestAndClear(size_t n)
{
- if (atomicMode == BitmapNotAtomic)
- return testAndClear(n);
-
- ASSERT(atomicMode == BitmapAtomic);
-
WordType mask = one << (n % wordSize);
size_t index = n / wordSize;
WordType* wordPtr = bits.data() + index;
@@ -177,20 +161,20 @@
return true;
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline void Bitmap<bitmapSize, atomicMode, WordType>::clear(size_t n)
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::clear(size_t n)
{
bits[n / wordSize] &= ~(one << (n % wordSize));
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline void Bitmap<bitmapSize, atomicMode, WordType>::clearAll()
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::clearAll()
{
memset(bits.data(), 0, sizeof(bits));
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline size_t Bitmap<bitmapSize, atomicMode, WordType>::nextPossiblyUnset(size_t start) const
+template<size_t bitmapSize, typename WordType>
+inline size_t Bitmap<bitmapSize, WordType>::nextPossiblyUnset(size_t start) const
{
if (!~bits[start / wordSize])
return ((start / wordSize) + 1) * wordSize;
@@ -197,8 +181,8 @@
return start + 1;
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline int64_t Bitmap<bitmapSize, atomicMode, WordType>::findRunOfZeros(size_t runLength) const
+template<size_t bitmapSize, typename WordType>
+inline int64_t Bitmap<bitmapSize, WordType>::findRunOfZeros(size_t runLength) const
{
if (!runLength)
runLength = 1;
@@ -217,8 +201,8 @@
return -1;
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline size_t Bitmap<bitmapSize, atomicMode, WordType>::count(size_t start) const
+template<size_t bitmapSize, typename WordType>
+inline size_t Bitmap<bitmapSize, WordType>::count(size_t start) const
{
size_t result = 0;
for ( ; (start % wordSize); ++start) {
@@ -230,8 +214,8 @@
return result;
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline size_t Bitmap<bitmapSize, atomicMode, WordType>::isEmpty() const
+template<size_t bitmapSize, typename WordType>
+inline size_t Bitmap<bitmapSize, WordType>::isEmpty() const
{
for (size_t i = 0; i < words; ++i)
if (bits[i])
@@ -239,8 +223,8 @@
return true;
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline size_t Bitmap<bitmapSize, atomicMode, WordType>::isFull() const
+template<size_t bitmapSize, typename WordType>
+inline size_t Bitmap<bitmapSize, WordType>::isFull() const
{
for (size_t i = 0; i < words; ++i)
if (~bits[i])
@@ -248,30 +232,30 @@
return true;
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline void Bitmap<bitmapSize, atomicMode, WordType>::merge(const Bitmap& other)
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::merge(const Bitmap& other)
{
for (size_t i = 0; i < words; ++i)
bits[i] |= other.bits[i];
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline void Bitmap<bitmapSize, atomicMode, WordType>::filter(const Bitmap& other)
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::filter(const Bitmap& other)
{
for (size_t i = 0; i < words; ++i)
bits[i] &= other.bits[i];
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline void Bitmap<bitmapSize, atomicMode, WordType>::exclude(const Bitmap& other)
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::exclude(const Bitmap& other)
{
for (size_t i = 0; i < words; ++i)
bits[i] &= ~other.bits[i];
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
+template<size_t bitmapSize, typename WordType>
template<typename Func>
-inline void Bitmap<bitmapSize, atomicMode, WordType>::forEachSetBit(const Func& func) const
+inline void Bitmap<bitmapSize, WordType>::forEachSetBit(const Func& func) const
{
for (size_t i = 0; i < words; ++i) {
WordType word = bits[i];
@@ -286,10 +270,28 @@
}
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline bool Bitmap<bitmapSize, atomicMode, WordType>::operator==(const Bitmap& other) const
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::mergeAndClear(Bitmap& other)
{
for (size_t i = 0; i < words; ++i) {
+ bits[i] |= other.bits[i];
+ other.bits[i] = 0;
+ }
+}
+
+template<size_t bitmapSize, typename WordType>
+inline void Bitmap<bitmapSize, WordType>::setAndClear(Bitmap& other)
+{
+ for (size_t i = 0; i < words; ++i) {
+ bits[i] = other.bits[i];
+ other.bits[i] = 0;
+ }
+}
+
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::operator==(const Bitmap& other) const
+{
+ for (size_t i = 0; i < words; ++i) {
if (bits[i] != other.bits[i])
return false;
}
@@ -296,14 +298,14 @@
return true;
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline bool Bitmap<bitmapSize, atomicMode, WordType>::operator!=(const Bitmap& other) const
+template<size_t bitmapSize, typename WordType>
+inline bool Bitmap<bitmapSize, WordType>::operator!=(const Bitmap& other) const
{
return !(*this == other);
}
-template<size_t bitmapSize, BitmapAtomicMode atomicMode, typename WordType>
-inline unsigned Bitmap<bitmapSize, atomicMode, WordType>::hash() const
+template<size_t bitmapSize, typename WordType>
+inline unsigned Bitmap<bitmapSize, WordType>::hash() const
{
unsigned result = 0;
for (size_t i = 0; i < words; ++i)