Title: [254708] trunk
Revision
254708
Author
[email protected]
Date
2020-01-16 14:05:00 -0800 (Thu, 16 Jan 2020)

Log Message

[bmalloc] IsoHeap's initial setup should be small
https://bugs.webkit.org/show_bug.cgi?id=206214

Reviewed by Michael Saboff.

Source/bmalloc:

Keep IsoHeap related data structures small by using Packed technique. We start using IsoHeap for many classes,
then it is important that we keep metadata for IsoHeap small because these data persistently exists.

1. We pass IsoHeapImpl<> as a parameter instead of holding it unnecessarily.
2. We make some of pointers Packed so that we can keep sizeof(IsoHeapImpl<Config>) small.
3. One of the drawback of PackedPtr is that loading and storing are not atomic. And we pass `const std::lock_guard<Mutex>&`
   to functions if functions need to be called with lock so that we ensure that PackedPtr are accessed only when lock is
   held correctly.

* CMakeLists.txt:
* bmalloc.xcodeproj/project.pbxproj:
* bmalloc/Algorithm.h:
(bmalloc::ctzConstexpr):
(bmalloc::getLSBSetNonZeroConstexpr):
* bmalloc/BPlatform.h:
* bmalloc/DebugHeap.cpp:
(bmalloc::DebugHeap::DebugHeap):
* bmalloc/DebugHeap.h:
* bmalloc/DeferredTrigger.h:
* bmalloc/DeferredTriggerInlines.h:
(bmalloc::DeferredTrigger<trigger>::didBecome):
(bmalloc::DeferredTrigger<trigger>::handleDeferral):
* bmalloc/Environment.cpp:
(bmalloc::Environment::Environment):
* bmalloc/Environment.h:
* bmalloc/Gigacage.cpp:
(bmalloc::PrimitiveDisableCallbacks::PrimitiveDisableCallbacks):
* bmalloc/Heap.cpp:
(bmalloc::Heap::freeableMemory):
(bmalloc::Heap::markAllLargeAsEligibile):
(bmalloc::Heap::decommitLargeRange):
(bmalloc::Heap::scavenge):
(bmalloc::Heap::scavengeToHighWatermark):
* bmalloc/Heap.h:
* bmalloc/HeapConstants.cpp:
(bmalloc::HeapConstants::HeapConstants):
* bmalloc/HeapConstants.h:
* bmalloc/IsoAllocator.h:
* bmalloc/IsoAllocatorInlines.h:
(bmalloc::IsoAllocator<Config>::IsoAllocator):
(bmalloc::IsoAllocator<Config>::allocate):
(bmalloc::IsoAllocator<Config>::allocateSlow):
(bmalloc::IsoAllocator<Config>::scavenge):
* bmalloc/IsoDeallocatorInlines.h:
(bmalloc::IsoDeallocator<Config>::scavenge):
* bmalloc/IsoDirectory.h:
* bmalloc/IsoDirectoryInlines.h:
(bmalloc::passedNumPages>::IsoDirectory):
(bmalloc::passedNumPages>::takeFirstEligible):
(bmalloc::passedNumPages>::didBecome):
(bmalloc::passedNumPages>::didDecommit):
(bmalloc::passedNumPages>::scavengePage):
(bmalloc::passedNumPages>::scavenge):
(bmalloc::passedNumPages>::scavengeToHighWatermark):
(bmalloc::passedNumPages>::forEachCommittedPage):
* bmalloc/IsoHeapImpl.cpp:
(bmalloc::IsoHeapImplBase::IsoHeapImplBase):
* bmalloc/IsoHeapImpl.h:
* bmalloc/IsoHeapImplInlines.h:
(bmalloc::IsoHeapImpl<Config>::IsoHeapImpl):
(bmalloc::IsoHeapImpl<Config>::takeFirstEligible):
(bmalloc::IsoHeapImpl<Config>::didBecomeEligibleOrDecommited):
(bmalloc::IsoHeapImpl<Config>::scavenge):
(bmalloc::IsoHeapImpl<Config>::scavengeToHighWatermark):
(bmalloc::IsoHeapImplBase::freeableMemory):
(bmalloc::IsoHeapImpl<Config>::numLiveObjects):
(bmalloc::IsoHeapImpl<Config>::numCommittedPages):
(bmalloc::IsoHeapImpl<Config>::forEachDirectory):
(bmalloc::IsoHeapImpl<Config>::forEachCommittedPage):
(bmalloc::IsoHeapImpl<Config>::forEachLiveObject):
(bmalloc::IsoHeapImplBase::footprint):
(bmalloc::IsoHeapImplBase::didCommit):
(bmalloc::IsoHeapImplBase::didDecommit):
(bmalloc::IsoHeapImplBase::isNowFreeable):
(bmalloc::IsoHeapImplBase::isNoLongerFreeable):
(bmalloc::IsoHeapImpl<Config>::allocateFromShared):
(bmalloc::IsoHeapImpl<Config>::freeableMemory): Deleted.
(bmalloc::IsoHeapImpl<Config>::footprint): Deleted.
(bmalloc::IsoHeapImpl<Config>::didCommit): Deleted.
(bmalloc::IsoHeapImpl<Config>::didDecommit): Deleted.
(bmalloc::IsoHeapImpl<Config>::isNowFreeable): Deleted.
(bmalloc::IsoHeapImpl<Config>::isNoLongerFreeable): Deleted.
* bmalloc/IsoPage.h:
(bmalloc::IsoPageBase::IsoPageBase):
* bmalloc/IsoPageInlines.h:
(bmalloc::IsoPage<Config>::IsoPage):
(bmalloc::IsoPage<Config>::free):
(bmalloc::IsoPage<Config>::startAllocating):
(bmalloc::IsoPage<Config>::stopAllocating):
(bmalloc::IsoPage<Config>::forEachLiveObject):
* bmalloc/IsoSharedHeap.h:
(bmalloc::IsoSharedHeap::IsoSharedHeap):
* bmalloc/IsoSharedHeapInlines.h:
(bmalloc::IsoSharedHeap::allocateNew):
(bmalloc::IsoSharedHeap::allocateSlow):
* bmalloc/IsoSharedPage.h:
* bmalloc/IsoSharedPageInlines.h:
(bmalloc::IsoSharedPage::free):
(bmalloc::IsoSharedPage::startAllocating):
(bmalloc::IsoSharedPage::stopAllocating):
* bmalloc/IsoTLS.h:
* bmalloc/IsoTLSAllocatorEntry.h:
* bmalloc/IsoTLSAllocatorEntryInlines.h:
(bmalloc::IsoTLSAllocatorEntry<Config>::scavenge):
* bmalloc/IsoTLSDeallocatorEntry.h:
* bmalloc/IsoTLSDeallocatorEntryInlines.h:
(bmalloc::IsoTLSDeallocatorEntry<Config>::scavenge):
* bmalloc/IsoTLSEntry.cpp:
(bmalloc::IsoTLSEntry::IsoTLSEntry):
* bmalloc/IsoTLSEntry.h:
* bmalloc/IsoTLSEntryInlines.h:
(bmalloc::DefaultIsoTLSEntry<EntryType>::DefaultIsoTLSEntry):
(bmalloc::DefaultIsoTLSEntry<EntryType>::~DefaultIsoTLSEntry): Deleted.
(bmalloc::DefaultIsoTLSEntry<EntryType>::scavenge): Deleted.
* bmalloc/IsoTLSInlines.h:
(bmalloc::IsoTLS::scavenge):
(bmalloc::IsoTLS::allocateImpl):
(bmalloc::IsoTLS::allocateFast):
(bmalloc::IsoTLS::allocateSlow):
* bmalloc/IsoTLSLayout.cpp:
(bmalloc::IsoTLSLayout::add):
* bmalloc/Packed.h: Added.
(bmalloc::Packed::Packed):
(bmalloc::Packed::get const):
(bmalloc::Packed::set):
(bmalloc::Packed::operator=):
(bmalloc::Packed::exchange):
(bmalloc::Packed::swap):
(bmalloc::alignof):
(bmalloc::PackedPtrTraits::exchange):
(bmalloc::PackedPtrTraits::swap):
(bmalloc::PackedPtrTraits::unwrap):
* bmalloc/Scavenger.cpp:
(bmalloc::Scavenger::Scavenger):
* bmalloc/Scavenger.h:
* bmalloc/VMHeap.cpp:
(bmalloc::VMHeap::VMHeap):
* bmalloc/VMHeap.h:
* bmalloc/Zone.cpp:
(bmalloc::Zone::Zone):
* bmalloc/Zone.h:

Tools:

* TestWebKitAPI/Tests/WTF/bmalloc/IsoHeap.cpp:
(assertHasObjects):
(assertHasOnlyObjects):
(assertClean):
(TEST):

Modified Paths

Added Paths

Diff

Modified: trunk/Source/bmalloc/CMakeLists.txt (254707 => 254708)


--- trunk/Source/bmalloc/CMakeLists.txt	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/CMakeLists.txt	2020-01-16 22:05:00 UTC (rev 254708)
@@ -112,6 +112,7 @@
     bmalloc/Mutex.h
     bmalloc/Object.h
     bmalloc/ObjectType.h
+    bmalloc/Packed.h
     bmalloc/PerHeapKind.h
     bmalloc/PerProcess.h
     bmalloc/PerThread.h

Modified: trunk/Source/bmalloc/ChangeLog (254707 => 254708)


--- trunk/Source/bmalloc/ChangeLog	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/ChangeLog	2020-01-16 22:05:00 UTC (rev 254708)
@@ -1,3 +1,152 @@
+2020-01-16  Yusuke Suzuki  <[email protected]>
+
+        [bmalloc] IsoHeap's initial setup should be small
+        https://bugs.webkit.org/show_bug.cgi?id=206214
+
+        Reviewed by Michael Saboff.
+
+        Keep IsoHeap related data structures small by using Packed technique. We start using IsoHeap for many classes,
+        then it is important that we keep metadata for IsoHeap small because these data persistently exists.
+
+        1. We pass IsoHeapImpl<> as a parameter instead of holding it unnecessarily.
+        2. We make some of pointers Packed so that we can keep sizeof(IsoHeapImpl<Config>) small.
+        3. One of the drawback of PackedPtr is that loading and storing are not atomic. And we pass `const std::lock_guard<Mutex>&`
+           to functions if functions need to be called with lock so that we ensure that PackedPtr are accessed only when lock is
+           held correctly.
+
+        * CMakeLists.txt:
+        * bmalloc.xcodeproj/project.pbxproj:
+        * bmalloc/Algorithm.h:
+        (bmalloc::ctzConstexpr):
+        (bmalloc::getLSBSetNonZeroConstexpr):
+        * bmalloc/BPlatform.h:
+        * bmalloc/DebugHeap.cpp:
+        (bmalloc::DebugHeap::DebugHeap):
+        * bmalloc/DebugHeap.h:
+        * bmalloc/DeferredTrigger.h:
+        * bmalloc/DeferredTriggerInlines.h:
+        (bmalloc::DeferredTrigger<trigger>::didBecome):
+        (bmalloc::DeferredTrigger<trigger>::handleDeferral):
+        * bmalloc/Environment.cpp:
+        (bmalloc::Environment::Environment):
+        * bmalloc/Environment.h:
+        * bmalloc/Gigacage.cpp:
+        (bmalloc::PrimitiveDisableCallbacks::PrimitiveDisableCallbacks):
+        * bmalloc/Heap.cpp:
+        (bmalloc::Heap::freeableMemory):
+        (bmalloc::Heap::markAllLargeAsEligibile):
+        (bmalloc::Heap::decommitLargeRange):
+        (bmalloc::Heap::scavenge):
+        (bmalloc::Heap::scavengeToHighWatermark):
+        * bmalloc/Heap.h:
+        * bmalloc/HeapConstants.cpp:
+        (bmalloc::HeapConstants::HeapConstants):
+        * bmalloc/HeapConstants.h:
+        * bmalloc/IsoAllocator.h:
+        * bmalloc/IsoAllocatorInlines.h:
+        (bmalloc::IsoAllocator<Config>::IsoAllocator):
+        (bmalloc::IsoAllocator<Config>::allocate):
+        (bmalloc::IsoAllocator<Config>::allocateSlow):
+        (bmalloc::IsoAllocator<Config>::scavenge):
+        * bmalloc/IsoDeallocatorInlines.h:
+        (bmalloc::IsoDeallocator<Config>::scavenge):
+        * bmalloc/IsoDirectory.h:
+        * bmalloc/IsoDirectoryInlines.h:
+        (bmalloc::passedNumPages>::IsoDirectory):
+        (bmalloc::passedNumPages>::takeFirstEligible):
+        (bmalloc::passedNumPages>::didBecome):
+        (bmalloc::passedNumPages>::didDecommit):
+        (bmalloc::passedNumPages>::scavengePage):
+        (bmalloc::passedNumPages>::scavenge):
+        (bmalloc::passedNumPages>::scavengeToHighWatermark):
+        (bmalloc::passedNumPages>::forEachCommittedPage):
+        * bmalloc/IsoHeapImpl.cpp:
+        (bmalloc::IsoHeapImplBase::IsoHeapImplBase):
+        * bmalloc/IsoHeapImpl.h:
+        * bmalloc/IsoHeapImplInlines.h:
+        (bmalloc::IsoHeapImpl<Config>::IsoHeapImpl):
+        (bmalloc::IsoHeapImpl<Config>::takeFirstEligible):
+        (bmalloc::IsoHeapImpl<Config>::didBecomeEligibleOrDecommited):
+        (bmalloc::IsoHeapImpl<Config>::scavenge):
+        (bmalloc::IsoHeapImpl<Config>::scavengeToHighWatermark):
+        (bmalloc::IsoHeapImplBase::freeableMemory):
+        (bmalloc::IsoHeapImpl<Config>::numLiveObjects):
+        (bmalloc::IsoHeapImpl<Config>::numCommittedPages):
+        (bmalloc::IsoHeapImpl<Config>::forEachDirectory):
+        (bmalloc::IsoHeapImpl<Config>::forEachCommittedPage):
+        (bmalloc::IsoHeapImpl<Config>::forEachLiveObject):
+        (bmalloc::IsoHeapImplBase::footprint):
+        (bmalloc::IsoHeapImplBase::didCommit):
+        (bmalloc::IsoHeapImplBase::didDecommit):
+        (bmalloc::IsoHeapImplBase::isNowFreeable):
+        (bmalloc::IsoHeapImplBase::isNoLongerFreeable):
+        (bmalloc::IsoHeapImpl<Config>::allocateFromShared):
+        (bmalloc::IsoHeapImpl<Config>::freeableMemory): Deleted.
+        (bmalloc::IsoHeapImpl<Config>::footprint): Deleted.
+        (bmalloc::IsoHeapImpl<Config>::didCommit): Deleted.
+        (bmalloc::IsoHeapImpl<Config>::didDecommit): Deleted.
+        (bmalloc::IsoHeapImpl<Config>::isNowFreeable): Deleted.
+        (bmalloc::IsoHeapImpl<Config>::isNoLongerFreeable): Deleted.
+        * bmalloc/IsoPage.h:
+        (bmalloc::IsoPageBase::IsoPageBase):
+        * bmalloc/IsoPageInlines.h:
+        (bmalloc::IsoPage<Config>::IsoPage):
+        (bmalloc::IsoPage<Config>::free):
+        (bmalloc::IsoPage<Config>::startAllocating):
+        (bmalloc::IsoPage<Config>::stopAllocating):
+        (bmalloc::IsoPage<Config>::forEachLiveObject):
+        * bmalloc/IsoSharedHeap.h:
+        (bmalloc::IsoSharedHeap::IsoSharedHeap):
+        * bmalloc/IsoSharedHeapInlines.h:
+        (bmalloc::IsoSharedHeap::allocateNew):
+        (bmalloc::IsoSharedHeap::allocateSlow):
+        * bmalloc/IsoSharedPage.h:
+        * bmalloc/IsoSharedPageInlines.h:
+        (bmalloc::IsoSharedPage::free):
+        (bmalloc::IsoSharedPage::startAllocating):
+        (bmalloc::IsoSharedPage::stopAllocating):
+        * bmalloc/IsoTLS.h:
+        * bmalloc/IsoTLSAllocatorEntry.h:
+        * bmalloc/IsoTLSAllocatorEntryInlines.h:
+        (bmalloc::IsoTLSAllocatorEntry<Config>::scavenge):
+        * bmalloc/IsoTLSDeallocatorEntry.h:
+        * bmalloc/IsoTLSDeallocatorEntryInlines.h:
+        (bmalloc::IsoTLSDeallocatorEntry<Config>::scavenge):
+        * bmalloc/IsoTLSEntry.cpp:
+        (bmalloc::IsoTLSEntry::IsoTLSEntry):
+        * bmalloc/IsoTLSEntry.h:
+        * bmalloc/IsoTLSEntryInlines.h:
+        (bmalloc::DefaultIsoTLSEntry<EntryType>::DefaultIsoTLSEntry):
+        (bmalloc::DefaultIsoTLSEntry<EntryType>::~DefaultIsoTLSEntry): Deleted.
+        (bmalloc::DefaultIsoTLSEntry<EntryType>::scavenge): Deleted.
+        * bmalloc/IsoTLSInlines.h:
+        (bmalloc::IsoTLS::scavenge):
+        (bmalloc::IsoTLS::allocateImpl):
+        (bmalloc::IsoTLS::allocateFast):
+        (bmalloc::IsoTLS::allocateSlow):
+        * bmalloc/IsoTLSLayout.cpp:
+        (bmalloc::IsoTLSLayout::add):
+        * bmalloc/Packed.h: Added.
+        (bmalloc::Packed::Packed):
+        (bmalloc::Packed::get const):
+        (bmalloc::Packed::set):
+        (bmalloc::Packed::operator=):
+        (bmalloc::Packed::exchange):
+        (bmalloc::Packed::swap):
+        (bmalloc::alignof):
+        (bmalloc::PackedPtrTraits::exchange):
+        (bmalloc::PackedPtrTraits::swap):
+        (bmalloc::PackedPtrTraits::unwrap):
+        * bmalloc/Scavenger.cpp:
+        (bmalloc::Scavenger::Scavenger):
+        * bmalloc/Scavenger.h:
+        * bmalloc/VMHeap.cpp:
+        (bmalloc::VMHeap::VMHeap):
+        * bmalloc/VMHeap.h:
+        * bmalloc/Zone.cpp:
+        (bmalloc::Zone::Zone):
+        * bmalloc/Zone.h:
+
 2020-01-14  Basuke Suzuki  <[email protected]>
 
         [bmalloc] Calculate LineMetadata for specific VM page size in compile time

Modified: trunk/Source/bmalloc/bmalloc/Algorithm.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/Algorithm.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/Algorithm.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -28,6 +28,7 @@
 
 #include "BAssert.h"
 #include <algorithm>
+#include <climits>
 #include <cstdint>
 #include <cstddef>
 #include <limits>
@@ -193,6 +194,31 @@
     return false;
 }
 
+template <typename T>
+constexpr unsigned ctzConstexpr(T value)
+{
+    constexpr unsigned bitSize = sizeof(T) * CHAR_BIT;
+
+    using UT = typename std::make_unsigned<T>::type;
+    UT uValue = value;
+
+    unsigned zeroCount = 0;
+    for (unsigned i = 0; i < bitSize; i++) {
+        if (uValue & 1)
+            break;
+
+        zeroCount++;
+        uValue >>= 1;
+    }
+    return zeroCount;
+}
+
+template<typename T>
+constexpr unsigned getLSBSetNonZeroConstexpr(T t)
+{
+    return ctzConstexpr(t);
+}
+
 } // namespace bmalloc
 
 #endif // Algorithm_h

Modified: trunk/Source/bmalloc/bmalloc/BPlatform.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/BPlatform.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/BPlatform.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -230,6 +230,45 @@
 
 #endif /* ARM */
 
+
+#if BCOMPILER(GCC_COMPATIBLE)
+/* __LP64__ is not defined on 64bit Windows since it uses LLP64. Using __SIZEOF_POINTER__ is simpler. */
+#if __SIZEOF_POINTER__ == 8
+#define BCPU_ADDRESS64 1
+#elif __SIZEOF_POINTER__ == 4
+#define BCPU_ADDRESS32 1
+#else
+#error "Unsupported pointer width"
+#endif
+#else
+#error "Unsupported compiler for bmalloc"
+#endif
+
+#if BCOMPILER(GCC_COMPATIBLE)
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define BCPU_BIG_ENDIAN 1
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define BCPU_LITTLE_ENDIAN 1
+#elif __BYTE_ORDER__ == __ORDER_PDP_ENDIAN__
+#define BCPU_MIDDLE_ENDIAN 1
+#else
+#error "Unknown endian"
+#endif
+#else
+#error "Unsupported compiler for bmalloc"
+#endif
+
+#if BCPU(ADDRESS64)
+#if BOS(DARWIN) && BCPU(ARM64)
+#define BOS_EFFECTIVE_ADDRESS_WIDTH 36
+#else
+/* We strongly assume that effective address width is <= 48 in 64bit architectures (e.g. NaN boxing). */
+#define BOS_EFFECTIVE_ADDRESS_WIDTH 48
+#endif
+#else
+#define BOS_EFFECTIVE_ADDRESS_WIDTH 32
+#endif
+
 #define BATTRIBUTE_PRINTF(formatStringArgument, extraArguments) __attribute__((__format__(printf, formatStringArgument, extraArguments)))
 
 #if BPLATFORM(MAC) || BPLATFORM(IOS_FAMILY)

Modified: trunk/Source/bmalloc/bmalloc/DebugHeap.cpp (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/DebugHeap.cpp	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/DebugHeap.cpp	2020-01-16 22:05:00 UTC (rev 254708)
@@ -40,7 +40,7 @@
 
 #if BOS(DARWIN)
 
-DebugHeap::DebugHeap(std::lock_guard<Mutex>&)
+DebugHeap::DebugHeap(const std::lock_guard<Mutex>&)
     : m_zone(malloc_create_zone(0, 0))
     , m_pageSize(vmPageSize())
 {
@@ -88,7 +88,7 @@
 
 #else
 
-DebugHeap::DebugHeap(std::lock_guard<Mutex>&)
+DebugHeap::DebugHeap(const std::lock_guard<Mutex>&)
     : m_pageSize(vmPageSize())
 {
 }

Modified: trunk/Source/bmalloc/bmalloc/DebugHeap.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/DebugHeap.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/DebugHeap.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -40,7 +40,7 @@
     
 class DebugHeap : private StaticPerProcess<DebugHeap> {
 public:
-    DebugHeap(std::lock_guard<Mutex>&);
+    DebugHeap(const std::lock_guard<Mutex>&);
     
     void* malloc(size_t, FailureAction);
     void* memalign(size_t alignment, size_t, FailureAction);

Modified: trunk/Source/bmalloc/bmalloc/DeferredTrigger.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/DeferredTrigger.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/DeferredTrigger.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -26,6 +26,8 @@
 #pragma once
 
 #include "IsoPageTrigger.h"
+#include "Mutex.h"
+#include <mutex>
 
 namespace bmalloc {
 
@@ -37,10 +39,10 @@
     DeferredTrigger() { }
     
     template<typename Config>
-    void didBecome(IsoPage<Config>&);
+    void didBecome(const std::lock_guard<Mutex>&, IsoPage<Config>&);
     
     template<typename Config>
-    void handleDeferral(IsoPage<Config>&);
+    void handleDeferral(const std::lock_guard<Mutex>&, IsoPage<Config>&);
     
 private:
     bool m_hasBeenDeferred { false };

Modified: trunk/Source/bmalloc/bmalloc/DeferredTriggerInlines.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/DeferredTriggerInlines.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/DeferredTriggerInlines.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -32,22 +32,22 @@
 
 template<IsoPageTrigger trigger>
 template<typename Config>
-void DeferredTrigger<trigger>::didBecome(IsoPage<Config>& page)
+void DeferredTrigger<trigger>::didBecome(const std::lock_guard<Mutex>& locker, IsoPage<Config>& page)
 {
     if (page.isInUseForAllocation())
         m_hasBeenDeferred = true;
     else
-        page.directory().didBecome(&page, trigger);
+        page.directory().didBecome(locker, &page, trigger);
 }
 
 template<IsoPageTrigger trigger>
 template<typename Config>
-void DeferredTrigger<trigger>::handleDeferral(IsoPage<Config>& page)
+void DeferredTrigger<trigger>::handleDeferral(const std::lock_guard<Mutex>& locker, IsoPage<Config>& page)
 {
     RELEASE_BASSERT(!page.isInUseForAllocation());
     
     if (m_hasBeenDeferred) {
-        page.directory().didBecome(&page, trigger);
+        page.directory().didBecome(locker, &page, trigger);
         m_hasBeenDeferred = false;
     }
 }

Modified: trunk/Source/bmalloc/bmalloc/Environment.cpp (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/Environment.cpp	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/Environment.cpp	2020-01-16 22:05:00 UTC (rev 254708)
@@ -127,7 +127,7 @@
 
 DEFINE_STATIC_PER_PROCESS_STORAGE(Environment);
 
-Environment::Environment(std::lock_guard<Mutex>&)
+Environment::Environment(const std::lock_guard<Mutex>&)
     : m_isDebugHeapEnabled(computeIsDebugHeapEnabled())
 {
 }

Modified: trunk/Source/bmalloc/bmalloc/Environment.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/Environment.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/Environment.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -33,7 +33,7 @@
 
 class Environment : public StaticPerProcess<Environment> {
 public:
-    BEXPORT Environment(std::lock_guard<Mutex>&);
+    BEXPORT Environment(const std::lock_guard<Mutex>&);
     
     bool isDebugHeapEnabled() { return m_isDebugHeapEnabled; }
 

Modified: trunk/Source/bmalloc/bmalloc/Gigacage.cpp (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/Gigacage.cpp	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/Gigacage.cpp	2020-01-16 22:05:00 UTC (rev 254708)
@@ -61,7 +61,7 @@
 namespace bmalloc {
 
 struct PrimitiveDisableCallbacks : public StaticPerProcess<PrimitiveDisableCallbacks> {
-    PrimitiveDisableCallbacks(std::lock_guard<Mutex>&) { }
+    PrimitiveDisableCallbacks(const std::lock_guard<Mutex>&) { }
     
     Vector<Gigacage::Callback> callbacks;
 };

Modified: trunk/Source/bmalloc/bmalloc/Heap.cpp (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/Heap.cpp	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/Heap.cpp	2020-01-16 22:05:00 UTC (rev 254708)
@@ -81,7 +81,7 @@
     return Gigacage::size(gigacageKind(m_kind));
 }
 
-size_t Heap::freeableMemory(std::lock_guard<Mutex>&)
+size_t Heap::freeableMemory(const std::lock_guard<Mutex>&)
 {
     return m_freeableMemory;
 }
@@ -91,7 +91,7 @@
     return m_footprint;
 }
 
-void Heap::markAllLargeAsEligibile(std::lock_guard<Mutex>&)
+void Heap::markAllLargeAsEligibile(const std::lock_guard<Mutex>&)
 {
     m_largeFree.markAllAsEligibile();
     m_hasPendingDecommits = false;
@@ -98,7 +98,7 @@
     m_condition.notify_all();
 }
 
-void Heap::decommitLargeRange(std::lock_guard<Mutex>&, LargeRange& range, BulkDecommit& decommitter)
+void Heap::decommitLargeRange(const std::lock_guard<Mutex>&, LargeRange& range, BulkDecommit& decommitter)
 {
     m_footprint -= range.totalPhysicalSize();
     m_freeableMemory -= range.totalPhysicalSize();
@@ -114,9 +114,9 @@
 }
 
 #if BUSE(PARTIAL_SCAVENGE)
-void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
+void Heap::scavenge(const std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
 #else
-void Heap::scavenge(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, size_t& deferredDecommits)
+void Heap::scavenge(const std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, size_t& deferredDecommits)
 #endif
 {
     for (auto& list : m_freePages) {
@@ -169,7 +169,7 @@
 }
 
 #if BUSE(PARTIAL_SCAVENGE)
-void Heap::scavengeToHighWatermark(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
+void Heap::scavengeToHighWatermark(const std::lock_guard<Mutex>& lock, BulkDecommit& decommitter)
 {
     void* newHighWaterMark = nullptr;
     for (LargeRange& range : m_largeFree) {

Modified: trunk/Source/bmalloc/bmalloc/Heap.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/Heap.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/Heap.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -76,14 +76,14 @@
     void shrinkLarge(std::unique_lock<Mutex>&, const Range&, size_t);
 
 #if BUSE(PARTIAL_SCAVENGE)
-    void scavengeToHighWatermark(std::lock_guard<Mutex>&, BulkDecommit&);
-    void scavenge(std::lock_guard<Mutex>&, BulkDecommit&);
+    void scavengeToHighWatermark(const std::lock_guard<Mutex>&, BulkDecommit&);
+    void scavenge(const std::lock_guard<Mutex>&, BulkDecommit&);
 #else
-    void scavenge(std::lock_guard<Mutex>&, BulkDecommit&, size_t& deferredDecommits);
+    void scavenge(const std::lock_guard<Mutex>&, BulkDecommit&, size_t& deferredDecommits);
 #endif
-    void scavenge(std::lock_guard<Mutex>&, BulkDecommit&, size_t& freed, size_t goal);
+    void scavenge(const std::lock_guard<Mutex>&, BulkDecommit&, size_t& freed, size_t goal);
 
-    size_t freeableMemory(std::lock_guard<Mutex>&);
+    size_t freeableMemory(const std::lock_guard<Mutex>&);
     size_t footprint();
 
     void externalDecommit(void* ptr, size_t);
@@ -91,10 +91,10 @@
     void externalCommit(void* ptr, size_t);
     void externalCommit(std::unique_lock<Mutex>&, void* ptr, size_t);
 
-    void markAllLargeAsEligibile(std::lock_guard<Mutex>&);
+    void markAllLargeAsEligibile(const std::lock_guard<Mutex>&);
 
 private:
-    void decommitLargeRange(std::lock_guard<Mutex>&, LargeRange&, BulkDecommit&);
+    void decommitLargeRange(const std::lock_guard<Mutex>&, LargeRange&, BulkDecommit&);
 
     struct LargeObjectHash {
         static unsigned hash(void* key)

Modified: trunk/Source/bmalloc/bmalloc/HeapConstants.cpp (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/HeapConstants.cpp	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/HeapConstants.cpp	2020-01-16 22:05:00 UTC (rev 254708)
@@ -30,7 +30,7 @@
 
 DEFINE_STATIC_PER_PROCESS_STORAGE(HeapConstants);
 
-HeapConstants::HeapConstants(std::lock_guard<Mutex>&)
+HeapConstants::HeapConstants(const std::lock_guard<Mutex>&)
     : m_vmPageSizePhysical { vmPageSizePhysical() }
 {
     RELEASE_BASSERT(m_vmPageSizePhysical >= smallPageSize);

Modified: trunk/Source/bmalloc/bmalloc/HeapConstants.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/HeapConstants.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/HeapConstants.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -37,7 +37,7 @@
 
 class HeapConstants : public StaticPerProcess<HeapConstants> {
 public:
-    HeapConstants(std::lock_guard<Mutex>&);
+    HeapConstants(const std::lock_guard<Mutex>&);
     ~HeapConstants() = delete;
 
     inline size_t pageClass(size_t sizeClass) const { return m_pageClasses[sizeClass]; }

Modified: trunk/Source/bmalloc/bmalloc/IsoAllocator.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoAllocator.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoAllocator.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -40,13 +40,12 @@
     IsoAllocator(IsoHeapImpl<Config>&);
     ~IsoAllocator();
     
-    void* allocate(bool abortOnFailure);
-    void scavenge();
+    void* allocate(IsoHeapImpl<Config>&, bool abortOnFailure);
+    void scavenge(IsoHeapImpl<Config>&);
     
 private:
-    void* allocateSlow(bool abortOnFailure);
+    void* allocateSlow(IsoHeapImpl<Config>&, bool abortOnFailure);
     
-    IsoHeapImpl<Config>* m_heap { nullptr };
     FreeList m_freeList;
     IsoPage<Config>* m_currentPage { nullptr };
 };

Modified: trunk/Source/bmalloc/bmalloc/IsoAllocatorInlines.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoAllocatorInlines.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoAllocatorInlines.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -34,8 +34,7 @@
 namespace bmalloc {
 
 template<typename Config>
-IsoAllocator<Config>::IsoAllocator(IsoHeapImpl<Config>& heap)
-    : m_heap(&heap)
+IsoAllocator<Config>::IsoAllocator(IsoHeapImpl<Config>&)
 {
 }
 
@@ -45,36 +44,36 @@
 }
 
 template<typename Config>
-void* IsoAllocator<Config>::allocate(bool abortOnFailure)
+void* IsoAllocator<Config>::allocate(IsoHeapImpl<Config>& heap, bool abortOnFailure)
 {
     static constexpr bool verbose = false;
     void* result = m_freeList.allocate<Config>(
         [&] () -> void* {
-            return allocateSlow(abortOnFailure);
+            return allocateSlow(heap, abortOnFailure);
         });
     if (verbose)
-        fprintf(stderr, "%p: allocated %p of size %u\n", m_heap, result, Config::objectSize);
+        fprintf(stderr, "%p: allocated %p of size %u\n", &heap, result, Config::objectSize);
     return result;
 }
 
 template<typename Config>
-BNO_INLINE void* IsoAllocator<Config>::allocateSlow(bool abortOnFailure)
+BNO_INLINE void* IsoAllocator<Config>::allocateSlow(IsoHeapImpl<Config>& heap, bool abortOnFailure)
 {
-    std::lock_guard<Mutex> locker(m_heap->lock);
+    std::lock_guard<Mutex> locker(heap.lock);
 
-    AllocationMode allocationMode = m_heap->updateAllocationMode();
+    AllocationMode allocationMode = heap.updateAllocationMode();
     if (allocationMode == AllocationMode::Shared) {
         if (m_currentPage) {
-            m_currentPage->stopAllocating(m_freeList);
+            m_currentPage->stopAllocating(locker, m_freeList);
             m_currentPage = nullptr;
             m_freeList.clear();
         }
-        return m_heap->allocateFromShared(locker, abortOnFailure);
+        return heap.allocateFromShared(locker, abortOnFailure);
     }
 
     BASSERT(allocationMode == AllocationMode::Fast);
     
-    EligibilityResult<Config> result = m_heap->takeFirstEligible();
+    EligibilityResult<Config> result = heap.takeFirstEligible(locker);
     if (result.kind != EligibilityKind::Success) {
         RELEASE_BASSERT(result.kind == EligibilityKind::OutOfMemory);
         RELEASE_BASSERT(!abortOnFailure);
@@ -82,20 +81,20 @@
     }
     
     if (m_currentPage)
-        m_currentPage->stopAllocating(m_freeList);
+        m_currentPage->stopAllocating(locker, m_freeList);
     
     m_currentPage = result.page;
-    m_freeList = m_currentPage->startAllocating();
+    m_freeList = m_currentPage->startAllocating(locker);
     
     return m_freeList.allocate<Config>([] () { BCRASH(); return nullptr; });
 }
 
 template<typename Config>
-void IsoAllocator<Config>::scavenge()
+void IsoAllocator<Config>::scavenge(IsoHeapImpl<Config>& heap)
 {
     if (m_currentPage) {
-        std::lock_guard<Mutex> locker(m_heap->lock);
-        m_currentPage->stopAllocating(m_freeList);
+        std::lock_guard<Mutex> locker(heap.lock);
+        m_currentPage->stopAllocating(locker, m_freeList);
         m_currentPage = nullptr;
         m_freeList.clear();
     }

Modified: trunk/Source/bmalloc/bmalloc/IsoDeallocatorInlines.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoDeallocatorInlines.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoDeallocatorInlines.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -76,7 +76,7 @@
     std::lock_guard<Mutex> locker(*m_lock);
     
     for (void* ptr : m_objectLog)
-        IsoPage<Config>::pageFor(ptr)->free(ptr);
+        IsoPage<Config>::pageFor(ptr)->free(locker, ptr);
     m_objectLog.clear();
 }
 

Modified: trunk/Source/bmalloc/bmalloc/IsoDirectory.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoDirectory.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoDirectory.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -28,6 +28,7 @@
 #include "Bits.h"
 #include "EligibilityResult.h"
 #include "IsoPage.h"
+#include "Packed.h"
 #include "Vector.h"
 
 namespace bmalloc {
@@ -49,7 +50,7 @@
     
     IsoHeapImpl<Config>& heap() { return m_heap; }
     
-    virtual void didBecome(IsoPage<Config>*, IsoPageTrigger) = 0;
+    virtual void didBecome(const std::lock_guard<Mutex>&, IsoPage<Config>*, IsoPageTrigger) = 0;
     
 protected:
     IsoHeapImpl<Config>& m_heap;
@@ -64,9 +65,9 @@
     
     // Find the first page that is eligible for allocation and return it. May return null if there is no
     // such thing. May allocate a new page if we have an uncommitted page.
-    EligibilityResult<Config> takeFirstEligible();
+    EligibilityResult<Config> takeFirstEligible(const std::lock_guard<Mutex>&);
     
-    void didBecome(IsoPage<Config>*, IsoPageTrigger) override;
+    void didBecome(const std::lock_guard<Mutex>&, IsoPage<Config>*, IsoPageTrigger) override;
     
     // This gets called from a bulk decommit function in the Scavenger, so no locks are held. This function
     // needs to get the heap lock.
@@ -74,23 +75,23 @@
     
     // Iterate over all empty and committed pages, and put them into the vector. This also records the
     // pages as being decommitted. It's the caller's job to do the actual decommitting.
-    void scavenge(Vector<DeferredDecommit>&);
+    void scavenge(const std::lock_guard<Mutex>&, Vector<DeferredDecommit>&);
 #if BUSE(PARTIAL_SCAVENGE)
-    void scavengeToHighWatermark(Vector<DeferredDecommit>&);
+    void scavengeToHighWatermark(const std::lock_guard<Mutex>&, Vector<DeferredDecommit>&);
 #endif
 
     template<typename Func>
-    void forEachCommittedPage(const Func&);
+    void forEachCommittedPage(const std::lock_guard<Mutex>&, const Func&);
     
 private:
-    void scavengePage(size_t, Vector<DeferredDecommit>&);
+    void scavengePage(const std::lock_guard<Mutex>&, size_t, Vector<DeferredDecommit>&);
 
+    std::array<PackedAlignedPtr<IsoPage<Config>, IsoPage<Config>::pageSize>, numPages> m_pages { };
     // NOTE: I suppose that this could be two bitvectors. But from working on the GC, I found that the
     // number of bitvectors does not matter as much as whether or not they make intuitive sense.
     Bits<numPages> m_eligible;
     Bits<numPages> m_empty;
     Bits<numPages> m_committed;
-    std::array<IsoPage<Config>*, numPages> m_pages;
     unsigned m_firstEligibleOrDecommitted { 0 };
 #if BUSE(PARTIAL_SCAVENGE)
     unsigned m_highWatermark { 0 };

Modified: trunk/Source/bmalloc/bmalloc/IsoDirectoryInlines.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoDirectoryInlines.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoDirectoryInlines.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -39,12 +39,10 @@
 IsoDirectory<Config, passedNumPages>::IsoDirectory(IsoHeapImpl<Config>& heap)
     : IsoDirectoryBase<Config>(heap)
 {
-    for (unsigned i = numPages; i--;)
-        m_pages[i] = nullptr;
 }
 
 template<typename Config, unsigned passedNumPages>
-EligibilityResult<Config> IsoDirectory<Config, passedNumPages>::takeFirstEligible()
+EligibilityResult<Config> IsoDirectory<Config, passedNumPages>::takeFirstEligible(const std::lock_guard<Mutex>&)
 {
     unsigned pageIndex = (m_eligible | ~m_committed).findBit(m_firstEligibleOrDecommitted, true);
     m_firstEligibleOrDecommitted = pageIndex;
@@ -59,7 +57,7 @@
     Scavenger& scavenger = *Scavenger::get();
     scavenger.didStartGrowing();
     
-    IsoPage<Config>* page = m_pages[pageIndex];
+    IsoPage<Config>* page = m_pages[pageIndex].get();
     
     if (!m_committed[pageIndex]) {
         scavenger.scheduleIfUnderMemoryPressure(IsoPageBase::pageSize);
@@ -93,7 +91,7 @@
 }
 
 template<typename Config, unsigned passedNumPages>
-void IsoDirectory<Config, passedNumPages>::didBecome(IsoPage<Config>* page, IsoPageTrigger trigger)
+void IsoDirectory<Config, passedNumPages>::didBecome(const std::lock_guard<Mutex>& locker, IsoPage<Config>* page, IsoPageTrigger trigger)
 {
     static constexpr bool verbose = false;
     unsigned pageIndex = page->index();
@@ -103,7 +101,7 @@
             fprintf(stderr, "%p: %p did become eligible.\n", this, page);
         m_eligible[pageIndex] = true;
         m_firstEligibleOrDecommitted = std::min(m_firstEligibleOrDecommitted, pageIndex);
-        this->m_heap.didBecomeEligibleOrDecommited(this);
+        this->m_heap.didBecomeEligibleOrDecommited(locker, this);
         return;
     case IsoPageTrigger::Empty:
         if (verbose)
@@ -125,28 +123,28 @@
     // syscall itself (which has to do many hard things).
     std::lock_guard<Mutex> locker(this->m_heap.lock);
     BASSERT(!!m_committed[index]);
-    this->m_heap.isNoLongerFreeable(m_pages[index], IsoPageBase::pageSize);
+    this->m_heap.isNoLongerFreeable(m_pages[index].get(), IsoPageBase::pageSize);
     m_committed[index] = false;
     m_firstEligibleOrDecommitted = std::min(m_firstEligibleOrDecommitted, index);
-    this->m_heap.didBecomeEligibleOrDecommited(this);
-    this->m_heap.didDecommit(m_pages[index], IsoPageBase::pageSize);
+    this->m_heap.didBecomeEligibleOrDecommited(locker, this);
+    this->m_heap.didDecommit(m_pages[index].get(), IsoPageBase::pageSize);
 }
 
 template<typename Config, unsigned passedNumPages>
-void IsoDirectory<Config, passedNumPages>::scavengePage(size_t index, Vector<DeferredDecommit>& decommits)
+void IsoDirectory<Config, passedNumPages>::scavengePage(const std::lock_guard<Mutex>&, size_t index, Vector<DeferredDecommit>& decommits)
 {
     // Make sure that this page is now off limits.
     m_empty[index] = false;
     m_eligible[index] = false;
-    decommits.push(DeferredDecommit(this, m_pages[index], index));
+    decommits.push(DeferredDecommit(this, m_pages[index].get(), index));
 }
 
 template<typename Config, unsigned passedNumPages>
-void IsoDirectory<Config, passedNumPages>::scavenge(Vector<DeferredDecommit>& decommits)
+void IsoDirectory<Config, passedNumPages>::scavenge(const std::lock_guard<Mutex>& locker, Vector<DeferredDecommit>& decommits)
 {
     (m_empty & m_committed).forEachSetBit(
         [&] (size_t index) {
-            scavengePage(index, decommits);
+            scavengePage(locker, index, decommits);
         });
 #if BUSE(PARTIAL_SCAVENGE)
     m_highWatermark = 0;
@@ -155,12 +153,12 @@
 
 #if BUSE(PARTIAL_SCAVENGE)
 template<typename Config, unsigned passedNumPages>
-void IsoDirectory<Config, passedNumPages>::scavengeToHighWatermark(Vector<DeferredDecommit>& decommits)
+void IsoDirectory<Config, passedNumPages>::scavengeToHighWatermark(const std::lock_guard<Mutex>& locker, Vector<DeferredDecommit>& decommits)
 {
     (m_empty & m_committed).forEachSetBit(
         [&] (size_t index) {
             if (index > m_highWatermark)
-                scavengePage(index, decommits);
+                scavengePage(locker, index, decommits);
         });
     m_highWatermark = 0;
 }
@@ -168,11 +166,11 @@
 
 template<typename Config, unsigned passedNumPages>
 template<typename Func>
-void IsoDirectory<Config, passedNumPages>::forEachCommittedPage(const Func& func)
+void IsoDirectory<Config, passedNumPages>::forEachCommittedPage(const std::lock_guard<Mutex>&, const Func& func)
 {
     m_committed.forEachSetBit(
         [&] (size_t index) {
-            func(*m_pages[index]);
+            func(*(m_pages[index].get()));
         });
 }
     

Modified: trunk/Source/bmalloc/bmalloc/IsoHeapImpl.cpp (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoHeapImpl.cpp	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoHeapImpl.cpp	2020-01-16 22:05:00 UTC (rev 254708)
@@ -31,7 +31,8 @@
 
 namespace bmalloc {
 
-IsoHeapImplBase::IsoHeapImplBase()
+IsoHeapImplBase::IsoHeapImplBase(Mutex& lock)
+    : lock(lock)
 {
 }
 

Modified: trunk/Source/bmalloc/bmalloc/IsoHeapImpl.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoHeapImpl.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoHeapImpl.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -29,6 +29,7 @@
 #include "IsoAllocator.h"
 #include "IsoDirectoryPage.h"
 #include "IsoTLSAllocatorEntry.h"
+#include "Packed.h"
 #include "PhysicalPageMap.h"
 
 namespace bmalloc {
@@ -51,27 +52,48 @@
 #if BUSE(PARTIAL_SCAVENGE)
     virtual void scavengeToHighWatermark(Vector<DeferredDecommit>&) = 0;
 #endif
-    virtual size_t freeableMemory() = 0;
-    virtual size_t footprint() = 0;
     
     void scavengeNow();
     static void finishScavenging(Vector<DeferredDecommit>&);
 
+    void didCommit(void* ptr, size_t bytes);
+    void didDecommit(void* ptr, size_t bytes);
+
+    void isNowFreeable(void* ptr, size_t bytes);
+    void isNoLongerFreeable(void* ptr, size_t bytes);
+
+    size_t freeableMemory();
+    size_t footprint();
+
     void addToAllIsoHeaps();
 
 protected:
-    IsoHeapImplBase();
+    IsoHeapImplBase(Mutex&);
 
     friend class IsoSharedPage;
     friend class AllIsoHeaps;
     
+public:
+    // It's almost always the caller's responsibility to grab the lock. This lock comes from the
+    // (*PerProcess<IsoTLSEntryHolder<IsoTLSDeallocatorEntry<Config>>>::get())->lock. That's pretty weird, and we don't
+    // try to disguise the fact that it's weird. We only do that because heaps in the same size class
+    // share the same deallocator log, so it makes sense for them to also share the same lock to
+    // amortize lock acquisition costs.
+    Mutex& lock;
+protected:
     IsoHeapImplBase* m_next { nullptr };
     std::chrono::steady_clock::time_point m_lastSlowPathTime;
-    std::array<void*, maxAllocationFromShared> m_sharedCells { };
+    size_t m_footprint { 0 };
+    size_t m_freeableMemory { 0 };
+#if ENABLE_PHYSICAL_PAGE_MAP
+    PhysicalPageMap m_physicalPageMap;
+#endif
+    std::array<PackedAlignedPtr<uint8_t, bmalloc::alignment>, maxAllocationFromShared> m_sharedCells { };
+protected:
     unsigned m_numberOfAllocationsFromSharedInOneCycle { 0 };
     unsigned m_availableShared { maxAllocationFromSharedMask };
     AllocationMode m_allocationMode { AllocationMode::Init };
-    
+    bool m_isInlineDirectoryEligibleOrDecommitted { true };
     static_assert(sizeof(m_availableShared) * 8 >= maxAllocationFromShared, "");
 };
 
@@ -83,11 +105,11 @@
 public:
     IsoHeapImpl();
     
-    EligibilityResult<Config> takeFirstEligible();
+    EligibilityResult<Config> takeFirstEligible(const std::lock_guard<Mutex>&);
     
     // Callbacks from directory.
-    void didBecomeEligibleOrDecommited(IsoDirectory<Config, numPagesInInlineDirectory>*);
-    void didBecomeEligibleOrDecommited(IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>*);
+    void didBecomeEligibleOrDecommited(const std::lock_guard<Mutex>&, IsoDirectory<Config, numPagesInInlineDirectory>*);
+    void didBecomeEligibleOrDecommited(const std::lock_guard<Mutex>&, IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>*);
     
     void scavenge(Vector<DeferredDecommit>&) override;
 #if BUSE(PARTIAL_SCAVENGE)
@@ -94,10 +116,6 @@
     void scavengeToHighWatermark(Vector<DeferredDecommit>&) override;
 #endif
 
-    size_t freeableMemory() override;
-
-    size_t footprint() override;
-    
     unsigned allocatorOffset();
     unsigned deallocatorOffset();
 
@@ -106,46 +124,25 @@
     unsigned numCommittedPages();
     
     template<typename Func>
-    void forEachDirectory(const Func&);
+    void forEachDirectory(const std::lock_guard<Mutex>&, const Func&);
     
     template<typename Func>
-    void forEachCommittedPage(const Func&);
+    void forEachCommittedPage(const std::lock_guard<Mutex>&, const Func&);
     
     // This is only accurate when all threads are scavenged. Otherwise it will overestimate.
     template<typename Func>
-    void forEachLiveObject(const Func&);
+    void forEachLiveObject(const std::lock_guard<Mutex>&, const Func&);
 
-    void didCommit(void* ptr, size_t bytes);
-    void didDecommit(void* ptr, size_t bytes);
-
-    void isNowFreeable(void* ptr, size_t bytes);
-    void isNoLongerFreeable(void* ptr, size_t bytes);
-
     AllocationMode updateAllocationMode();
     void* allocateFromShared(const std::lock_guard<Mutex>&, bool abortOnFailure);
-    
-    // It's almost always the caller's responsibility to grab the lock. This lock comes from the
-    // (*PerProcess<IsoTLSEntryHolder<IsoTLSDeallocatorEntry<Config>>>::get())->lock. That's pretty weird, and we don't
-    // try to disguise the fact that it's weird. We only do that because heaps in the same size class
-    // share the same deallocator log, so it makes sense for them to also share the same lock to
-    // amortize lock acquisition costs.
-    Mutex& lock;
 
 private:
+    PackedPtr<IsoDirectoryPage<Config>> m_headDirectory { nullptr };
+    PackedPtr<IsoDirectoryPage<Config>> m_tailDirectory { nullptr };
+    PackedPtr<IsoDirectoryPage<Config>> m_firstEligibleOrDecommitedDirectory { nullptr };
     IsoDirectory<Config, numPagesInInlineDirectory> m_inlineDirectory;
-    IsoDirectoryPage<Config>* m_headDirectory { nullptr };
-    IsoDirectoryPage<Config>* m_tailDirectory { nullptr };
-    size_t m_footprint { 0 };
-    size_t m_freeableMemory { 0 };
-#if ENABLE_PHYSICAL_PAGE_MAP
-    PhysicalPageMap m_physicalPageMap;
-#endif
     unsigned m_nextDirectoryPageIndex { 1 }; // We start at 1 so that the high water mark being zero means we've only allocated in the inline directory since the last scavenge.
     unsigned m_directoryHighWatermark { 0 };
-    
-    bool m_isInlineDirectoryEligibleOrDecommitted { true };
-    IsoDirectoryPage<Config>* m_firstEligibleOrDecommitedDirectory { nullptr };
-    
     IsoTLSEntryHolder<IsoTLSAllocatorEntry<Config>> m_allocator;
 };
 

Modified: trunk/Source/bmalloc/bmalloc/IsoHeapImplInlines.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoHeapImplInlines.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoHeapImplInlines.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -34,7 +34,7 @@
 
 template<typename Config>
 IsoHeapImpl<Config>::IsoHeapImpl()
-    : lock((*PerProcess<IsoTLSEntryHolder<IsoTLSDeallocatorEntry<Config>>>::get())->lock)
+    : IsoHeapImplBase((*PerProcess<IsoTLSEntryHolder<IsoTLSDeallocatorEntry<Config>>>::get())->lock)
     , m_inlineDirectory(*this)
     , m_allocator(*this)
 {
@@ -41,10 +41,10 @@
 }
 
 template<typename Config>
-EligibilityResult<Config> IsoHeapImpl<Config>::takeFirstEligible()
+EligibilityResult<Config> IsoHeapImpl<Config>::takeFirstEligible(const std::lock_guard<Mutex>& locker)
 {
     if (m_isInlineDirectoryEligibleOrDecommitted) {
-        EligibilityResult<Config> result = m_inlineDirectory.takeFirstEligible();
+        EligibilityResult<Config> result = m_inlineDirectory.takeFirstEligible(locker);
         if (result.kind == EligibilityKind::Full)
             m_isInlineDirectoryEligibleOrDecommitted = false;
         else
@@ -51,40 +51,46 @@
             return result;
     }
     
-    if (!m_firstEligibleOrDecommitedDirectory) {
-        // If nothing is eligible, it can only be because we have no directories. It wouldn't be the end
-        // of the world if we broke this invariant. It would only mean that didBecomeEligibleOrDecommited() would need
-        // a null check.
-        RELEASE_BASSERT(!m_headDirectory);
-        RELEASE_BASSERT(!m_tailDirectory);
-    }
-    
-    for (; m_firstEligibleOrDecommitedDirectory; m_firstEligibleOrDecommitedDirectory = m_firstEligibleOrDecommitedDirectory->next) {
-        EligibilityResult<Config> result = m_firstEligibleOrDecommitedDirectory->payload.takeFirstEligible();
-        if (result.kind != EligibilityKind::Full) {
-            m_directoryHighWatermark = std::max(m_directoryHighWatermark, m_firstEligibleOrDecommitedDirectory->index());
-            return result;
+    {
+        auto* cursor = m_firstEligibleOrDecommitedDirectory.get();
+        if (!cursor) {
+            // If nothing is eligible, it can only be because we have no directories. It wouldn't be the end
+            // of the world if we broke this invariant. It would only mean that didBecomeEligibleOrDecommited() would need
+            // a null check.
+            RELEASE_BASSERT(!m_headDirectory.get());
+            RELEASE_BASSERT(!m_tailDirectory.get());
+        } else {
+            for (; cursor; cursor = cursor->next) {
+                EligibilityResult<Config> result = cursor->payload.takeFirstEligible(locker);
+                ASSERT(m_firstEligibleOrDecommitedDirectory.get() == cursor);
+                if (result.kind != EligibilityKind::Full) {
+                    m_directoryHighWatermark = std::max(m_directoryHighWatermark, cursor->index());
+                    m_firstEligibleOrDecommitedDirectory = cursor;
+                    return result;
+                }
+            }
+            m_firstEligibleOrDecommitedDirectory = nullptr;
         }
     }
     
     auto* newDirectory = new IsoDirectoryPage<Config>(*this, m_nextDirectoryPageIndex++);
-    if (m_headDirectory) {
+    if (m_headDirectory.get()) {
         m_tailDirectory->next = newDirectory;
         m_tailDirectory = newDirectory;
     } else {
-        RELEASE_BASSERT(!m_tailDirectory);
+        RELEASE_BASSERT(!m_tailDirectory.get());
         m_headDirectory = newDirectory;
         m_tailDirectory = newDirectory;
     }
     m_directoryHighWatermark = newDirectory->index();
     m_firstEligibleOrDecommitedDirectory = newDirectory;
-    EligibilityResult<Config> result = newDirectory->payload.takeFirstEligible();
+    EligibilityResult<Config> result = newDirectory->payload.takeFirstEligible(locker);
     RELEASE_BASSERT(result.kind != EligibilityKind::Full);
     return result;
 }
 
 template<typename Config>
-void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(IsoDirectory<Config, numPagesInInlineDirectory>* directory)
+void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(const std::lock_guard<Mutex>&, IsoDirectory<Config, numPagesInInlineDirectory>* directory)
 {
     RELEASE_BASSERT(directory == &m_inlineDirectory);
     m_isInlineDirectoryEligibleOrDecommitted = true;
@@ -91,7 +97,7 @@
 }
 
 template<typename Config>
-void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>* directory)
+void IsoHeapImpl<Config>::didBecomeEligibleOrDecommited(const std::lock_guard<Mutex>&, IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>* directory)
 {
     RELEASE_BASSERT(m_firstEligibleOrDecommitedDirectory);
     auto* directoryPage = IsoDirectoryPage<Config>::pageFor(directory);
@@ -104,8 +110,9 @@
 {
     std::lock_guard<Mutex> locker(this->lock);
     forEachDirectory(
+        locker,
         [&] (auto& directory) {
-            directory.scavenge(decommits);
+            directory.scavenge(locker, decommits);
         });
     m_directoryHighWatermark = 0;
 }
@@ -116,17 +123,16 @@
 {
     std::lock_guard<Mutex> locker(this->lock);
     if (!m_directoryHighWatermark)
-        m_inlineDirectory.scavengeToHighWatermark(decommits);
-    for (IsoDirectoryPage<Config>* page = m_headDirectory; page; page = page->next) {
+        m_inlineDirectory.scavengeToHighWatermark(locker, decommits);
+    for (IsoDirectoryPage<Config>* page = m_headDirectory.get(); page; page = page->next) {
         if (page->index() >= m_directoryHighWatermark)
-            page->payload.scavengeToHighWatermark(decommits);
+            page->payload.scavengeToHighWatermark(locker, decommits);
     }
     m_directoryHighWatermark = 0;
 }
 #endif
 
-template<typename Config>
-size_t IsoHeapImpl<Config>::freeableMemory()
+inline size_t IsoHeapImplBase::freeableMemory()
 {
     return m_freeableMemory;
 }
@@ -146,8 +152,10 @@
 template<typename Config>
 unsigned IsoHeapImpl<Config>::numLiveObjects()
 {
+    std::lock_guard<Mutex> locker(this->lock);
     unsigned result = 0;
     forEachLiveObject(
+        locker,
         [&] (void*) {
             result++;
         });
@@ -157,8 +165,10 @@
 template<typename Config>
 unsigned IsoHeapImpl<Config>::numCommittedPages()
 {
+    std::lock_guard<Mutex> locker(this->lock);
     unsigned result = 0;
     forEachCommittedPage(
+        locker,
         [&] (IsoPage<Config>&) {
             result++;
         });
@@ -167,40 +177,41 @@
 
 template<typename Config>
 template<typename Func>
-void IsoHeapImpl<Config>::forEachDirectory(const Func& func)
+void IsoHeapImpl<Config>::forEachDirectory(const std::lock_guard<Mutex>&, const Func& func)
 {
     func(m_inlineDirectory);
-    for (IsoDirectoryPage<Config>* page = m_headDirectory; page; page = page->next)
+    for (IsoDirectoryPage<Config>* page = m_headDirectory.get(); page; page = page->next)
         func(page->payload);
 }
 
 template<typename Config>
 template<typename Func>
-void IsoHeapImpl<Config>::forEachCommittedPage(const Func& func)
+void IsoHeapImpl<Config>::forEachCommittedPage(const std::lock_guard<Mutex>& locker, const Func& func)
 {
     forEachDirectory(
+        locker,
         [&] (auto& directory) {
-            directory.forEachCommittedPage(func);
+            directory.forEachCommittedPage(locker, func);
         });
 }
 
 template<typename Config>
 template<typename Func>
-void IsoHeapImpl<Config>::forEachLiveObject(const Func& func)
+void IsoHeapImpl<Config>::forEachLiveObject(const std::lock_guard<Mutex>& locker, const Func& func)
 {
     forEachCommittedPage(
+        locker,
         [&] (IsoPage<Config>& page) {
-            page.forEachLiveObject(func);
+            page.forEachLiveObject(locker, func);
         });
     for (unsigned index = 0; index < maxAllocationFromShared; ++index) {
-        void* pointer = m_sharedCells[index];
+        void* pointer = m_sharedCells[index].get();
         if (pointer && !(m_availableShared & (1U << index)))
             func(pointer);
     }
 }
 
-template<typename Config>
-size_t IsoHeapImpl<Config>::footprint()
+inline size_t IsoHeapImplBase::footprint()
 {
 #if ENABLE_PHYSICAL_PAGE_MAP
     RELEASE_BASSERT(m_footprint == m_physicalPageMap.footprint());
@@ -208,8 +219,7 @@
     return m_footprint;
 }
 
-template<typename Config>
-void IsoHeapImpl<Config>::didCommit(void* ptr, size_t bytes)
+inline void IsoHeapImplBase::didCommit(void* ptr, size_t bytes)
 {
     BUNUSED_PARAM(ptr);
     m_footprint += bytes;
@@ -218,8 +228,7 @@
 #endif
 }
 
-template<typename Config>
-void IsoHeapImpl<Config>::didDecommit(void* ptr, size_t bytes)
+inline void IsoHeapImplBase::didDecommit(void* ptr, size_t bytes)
 {
     BUNUSED_PARAM(ptr);
     m_footprint -= bytes;
@@ -228,15 +237,13 @@
 #endif
 }
 
-template<typename Config>
-void IsoHeapImpl<Config>::isNowFreeable(void* ptr, size_t bytes)
+inline void IsoHeapImplBase::isNowFreeable(void* ptr, size_t bytes)
 {
     BUNUSED_PARAM(ptr);
     m_freeableMemory += bytes;
 }
 
-template<typename Config>
-void IsoHeapImpl<Config>::isNoLongerFreeable(void* ptr, size_t bytes)
+inline void IsoHeapImplBase::isNoLongerFreeable(void* ptr, size_t bytes)
 {
     BUNUSED_PARAM(ptr);
     m_freeableMemory -= bytes;
@@ -301,7 +308,7 @@
     unsigned indexPlusOne = __builtin_ffs(m_availableShared);
     BASSERT(indexPlusOne);
     unsigned index = indexPlusOne - 1;
-    void* result = m_sharedCells[index];
+    void* result = m_sharedCells[index].get();
     if (result) {
         if (verbose)
             fprintf(stderr, "%p: allocated %p from shared again of size %u\n", this, result, Config::objectSize);
@@ -314,7 +321,7 @@
             fprintf(stderr, "%p: allocated %p from shared of size %u\n", this, result, Config::objectSize);
         BASSERT(index < IsoHeapImplBase::maxAllocationFromShared);
         *indexSlotFor<Config>(result) = index;
-        m_sharedCells[index] = result;
+        m_sharedCells[index] = bitwise_cast<uint8_t*>(result);
     }
     BASSERT(result);
     m_availableShared &= ~(1U << index);

Modified: trunk/Source/bmalloc/bmalloc/IsoPage.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoPage.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoPage.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -28,7 +28,9 @@
 #include "Bits.h"
 #include "DeferredTrigger.h"
 #include "FreeList.h"
+#include "Mutex.h"
 #include <climits>
+#include <mutex>
 
 namespace bmalloc {
 
@@ -42,6 +44,8 @@
 
     explicit IsoPageBase(bool isShared)
         : m_isShared(isShared)
+        , m_eligibilityHasBeenNoted(true)
+        , m_isInUseForAllocation(false)
     {
     }
 
@@ -52,7 +56,9 @@
 protected:
     BEXPORT static void* allocatePageMemory();
 
-    bool m_isShared { false };
+    bool m_isShared : 1;
+    bool m_eligibilityHasBeenNoted : 1;
+    bool m_isInUseForAllocation : 1;
 };
 
 template<typename Config>
@@ -71,19 +77,19 @@
 
     unsigned index() const { return m_index; }
     
-    void free(void*);
+    void free(const std::lock_guard<Mutex>&, void*);
 
     // Called after this page is already selected for allocation.
-    FreeList startAllocating();
+    FreeList startAllocating(const std::lock_guard<Mutex>&);
     
     // Called after the allocator picks another page to replace this one.
-    void stopAllocating(FreeList freeList);
+    void stopAllocating(const std::lock_guard<Mutex>&, FreeList);
 
     IsoDirectoryBase<Config>& directory() { return m_directory; }
     bool isInUseForAllocation() const { return m_isInUseForAllocation; }
     
     template<typename Func>
-    void forEachLiveObject(const Func&);
+    void forEachLiveObject(const std::lock_guard<Mutex>&, const Func&);
     
     IsoHeapImpl<Config>& heap();
     
@@ -111,16 +117,13 @@
 
     // This must have a trivial destructor.
 
-    bool m_eligibilityHasBeenNoted { true };
-    bool m_isInUseForAllocation { false };
     DeferredTrigger<IsoPageTrigger::Eligible> m_eligibilityTrigger;
     DeferredTrigger<IsoPageTrigger::Empty> m_emptyTrigger;
-
+    uint8_t m_numNonEmptyWords { 0 };
+    static_assert(bitsArrayLength(numObjects) <= UINT8_MAX);
+    unsigned m_index { UINT_MAX };
     IsoDirectoryBase<Config>& m_directory;
-    unsigned m_index { UINT_MAX };
-    
     unsigned m_allocBits[bitsArrayLength(numObjects)];
-    unsigned m_numNonEmptyWords { 0 };
 };
 
 } // namespace bmalloc

Modified: trunk/Source/bmalloc/bmalloc/IsoPageInlines.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoPageInlines.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoPageInlines.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -47,8 +47,8 @@
 template<typename Config>
 IsoPage<Config>::IsoPage(IsoDirectoryBase<Config>& directory, unsigned index)
     : IsoPageBase(false)
+    , m_index(index)
     , m_directory(directory)
-    , m_index(index)
 {
     memset(m_allocBits, 0, sizeof(m_allocBits));
 }
@@ -65,7 +65,7 @@
 }
 
 template<typename Config>
-void IsoPage<Config>::free(void* passedPtr)
+void IsoPage<Config>::free(const std::lock_guard<Mutex>& locker, void* passedPtr)
 {
     BASSERT(!m_isShared);
     unsigned offset = static_cast<char*>(passedPtr) - reinterpret_cast<char*>(this);
@@ -72,7 +72,7 @@
     unsigned index = offset / Config::objectSize;
     
     if (!m_eligibilityHasBeenNoted) {
-        m_eligibilityTrigger.didBecome(*this);
+        m_eligibilityTrigger.didBecome(locker, *this);
         m_eligibilityHasBeenNoted = true;
     }
     
@@ -82,12 +82,12 @@
     unsigned newWord = m_allocBits[wordIndex] &= ~(1 << bitIndex);
     if (!newWord) {
         if (!--m_numNonEmptyWords)
-            m_emptyTrigger.didBecome(*this);
+            m_emptyTrigger.didBecome(locker, *this);
     }
 }
 
 template<typename Config>
-FreeList IsoPage<Config>::startAllocating()
+FreeList IsoPage<Config>::startAllocating(const std::lock_guard<Mutex>&)
 {
     static constexpr bool verbose = false;
     
@@ -208,7 +208,7 @@
 }
 
 template<typename Config>
-void IsoPage<Config>::stopAllocating(FreeList freeList)
+void IsoPage<Config>::stopAllocating(const std::lock_guard<Mutex>& locker, FreeList freeList)
 {
     static constexpr bool verbose = false;
     
@@ -217,19 +217,19 @@
     
     freeList.forEach<Config>(
         [&] (void* ptr) {
-            free(ptr);
+            free(locker, ptr);
         });
 
     RELEASE_BASSERT(m_isInUseForAllocation);
     m_isInUseForAllocation = false;
 
-    m_eligibilityTrigger.handleDeferral(*this);
-    m_emptyTrigger.handleDeferral(*this);
+    m_eligibilityTrigger.handleDeferral(locker, *this);
+    m_emptyTrigger.handleDeferral(locker, *this);
 }
 
 template<typename Config>
 template<typename Func>
-void IsoPage<Config>::forEachLiveObject(const Func& func)
+void IsoPage<Config>::forEachLiveObject(const std::lock_guard<Mutex>&, const Func& func)
 {
     for (unsigned wordIndex = 0; wordIndex < bitsArrayLength(numObjects); ++wordIndex) {
         unsigned word = m_allocBits[wordIndex];

Modified: trunk/Source/bmalloc/bmalloc/IsoSharedHeap.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoSharedHeap.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoSharedHeap.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -53,7 +53,7 @@
 
 class IsoSharedHeap : public StaticPerProcess<IsoSharedHeap> {
 public:
-    IsoSharedHeap(std::lock_guard<Mutex>&)
+    IsoSharedHeap(const std::lock_guard<Mutex>&)
     {
     }
 
@@ -62,7 +62,7 @@
 
 private:
     template<unsigned>
-    void* allocateSlow(bool abortOnFailure);
+    void* allocateSlow(const std::lock_guard<Mutex>&, bool abortOnFailure);
 
     IsoSharedPage* m_currentPage { nullptr };
     VariadicBumpAllocator m_allocator;

Modified: trunk/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -55,12 +55,12 @@
     constexpr unsigned objectSize = computeObjectSizeForSharedCell(passedObjectSize);
     return m_allocator.template allocate<objectSize>(
         [&] () -> void* {
-            return allocateSlow<passedObjectSize>(abortOnFailure);
+            return allocateSlow<passedObjectSize>(locker, abortOnFailure);
         });
 }
 
 template<unsigned passedObjectSize>
-BNO_INLINE void* IsoSharedHeap::allocateSlow(bool abortOnFailure)
+BNO_INLINE void* IsoSharedHeap::allocateSlow(const std::lock_guard<Mutex>& locker, bool abortOnFailure)
 {
     Scavenger& scavenger = *Scavenger::get();
     scavenger.didStartGrowing();
@@ -73,10 +73,10 @@
     }
 
     if (m_currentPage)
-        m_currentPage->stopAllocating();
+        m_currentPage->stopAllocating(locker);
 
     m_currentPage = page;
-    m_allocator = m_currentPage->startAllocating();
+    m_allocator = m_currentPage->startAllocating(locker);
 
     constexpr unsigned objectSize = computeObjectSizeForSharedCell(passedObjectSize);
     return m_allocator.allocate<objectSize>([] () { BCRASH(); return nullptr; });

Modified: trunk/Source/bmalloc/bmalloc/IsoSharedPage.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoSharedPage.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoSharedPage.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -39,8 +39,8 @@
 
     template<typename Config, typename Type>
     void free(const std::lock_guard<Mutex>&, api::IsoHeap<Type>&, void*);
-    VariadicBumpAllocator startAllocating();
-    void stopAllocating();
+    VariadicBumpAllocator startAllocating(const std::lock_guard<Mutex>&);
+    void stopAllocating(const std::lock_guard<Mutex>&);
 
 private:
     IsoSharedPage()

Modified: trunk/Source/bmalloc/bmalloc/IsoSharedPageInlines.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoSharedPageInlines.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoSharedPageInlines.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -42,11 +42,11 @@
     // IsoDeallocator::deallocate is called from delete operator. This is dispatched by vtable if virtual destructor exists.
     // If vptr is replaced to the other vptr, we may accidentally chain this pointer to the incorrect HeapImplBase, which totally breaks the IsoHeap's goal.
     // To harden that, we validate that this pointer is actually allocated for a specific HeapImplBase here by checking whether this pointer is listed in HeapImplBase's shared cells.
-    RELEASE_BASSERT(heapImpl.m_sharedCells[index] == ptr);
+    RELEASE_BASSERT(heapImpl.m_sharedCells[index].get() == ptr);
     heapImpl.m_availableShared |= (1U << index);
 }
 
-inline VariadicBumpAllocator IsoSharedPage::startAllocating()
+inline VariadicBumpAllocator IsoSharedPage::startAllocating(const std::lock_guard<Mutex>&)
 {
     static constexpr bool verbose = false;
 
@@ -61,7 +61,7 @@
     return VariadicBumpAllocator(payloadEnd, remaining);
 }
 
-inline void IsoSharedPage::stopAllocating()
+inline void IsoSharedPage::stopAllocating(const std::lock_guard<Mutex>&)
 {
     static constexpr bool verbose = false;
 

Modified: trunk/Source/bmalloc/bmalloc/IsoTLS.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoTLS.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoTLS.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -60,8 +60,8 @@
     template<typename Config, typename Type>
     static void* allocateImpl(api::IsoHeap<Type>&, bool abortOnFailure);
     
-    template<typename Config>
-    void* allocateFast(unsigned offset, bool abortOnFailure);
+    template<typename Config, typename Type>
+    void* allocateFast(api::IsoHeap<Type>&, unsigned offset, bool abortOnFailure);
     
     template<typename Config, typename Type>
     static void* allocateSlow(api::IsoHeap<Type>&, bool abortOnFailure);

Modified: trunk/Source/bmalloc/bmalloc/IsoTLSAllocatorEntry.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoTLSAllocatorEntry.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoTLSAllocatorEntry.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -42,6 +42,8 @@
     IsoTLSAllocatorEntry(IsoHeapImpl<Config>&);
 
     void construct(void* dst) override;
+
+    void scavenge(void* entry) override;
     
     IsoHeapImpl<Config>& m_heap;
 };

Modified: trunk/Source/bmalloc/bmalloc/IsoTLSAllocatorEntryInlines.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoTLSAllocatorEntryInlines.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoTLSAllocatorEntryInlines.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -46,5 +46,11 @@
     new (dst) IsoAllocator<Config>(m_heap);
 }
 
+template<typename Config>
+void IsoTLSAllocatorEntry<Config>::scavenge(void* entry)
+{
+    static_cast<IsoAllocator<Config>*>(entry)->scavenge(m_heap);
+}
+
 } // namespace bmalloc
 

Modified: trunk/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntry.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntry.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntry.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -46,6 +46,7 @@
     IsoTLSDeallocatorEntry(const std::lock_guard<Mutex>&);
 
     void construct(void* entry) override;
+    void scavenge(void* entry) override;
 };
 
 } // namespace bmalloc

Modified: trunk/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntryInlines.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntryInlines.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoTLSDeallocatorEntryInlines.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -43,5 +43,11 @@
     new (entry) IsoDeallocator<Config>(lock);
 }
 
+template<typename Config>
+void IsoTLSDeallocatorEntry<Config>::scavenge(void* entry)
+{
+    static_cast<IsoDeallocator<Config>*>(entry)->scavenge();
+}
+
 } // namespace bmalloc
 

Modified: trunk/Source/bmalloc/bmalloc/IsoTLSEntry.cpp (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoTLSEntry.cpp	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoTLSEntry.cpp	2020-01-16 22:05:00 UTC (rev 254708)
@@ -31,10 +31,8 @@
 
 namespace bmalloc {
 
-IsoTLSEntry::IsoTLSEntry(size_t alignment, size_t size)
-    : m_offset(UINT_MAX)
-    , m_alignment(alignment)
-    , m_size(size)
+IsoTLSEntry::IsoTLSEntry(size_t size)
+    : m_size(size)
 {
 }
 

Modified: trunk/Source/bmalloc/bmalloc/IsoTLSEntry.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoTLSEntry.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoTLSEntry.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -65,7 +65,7 @@
     virtual ~IsoTLSEntry();
     
     size_t offset() const { return m_offset; }
-    size_t alignment() const { return m_alignment; }
+    size_t alignment() const { return sizeof(void*); }
     size_t size() const { return m_size; }
     size_t extent() const { return m_offset + m_size; }
     
@@ -78,7 +78,7 @@
     void walkUpToInclusive(IsoTLSEntry*, const Func&);
 
 protected:
-    IsoTLSEntry(size_t alignment, size_t size);
+    IsoTLSEntry(size_t size);
     
 private:
     friend class IsoTLS;
@@ -86,15 +86,14 @@
 
     IsoTLSEntry* m_next { nullptr };
     
-    size_t m_offset; // Computed in constructor.
-    size_t m_alignment;
-    size_t m_size;
+    unsigned m_offset { UINT_MAX }; // Computed in constructor.
+    unsigned m_size;
 };
 
 template<typename EntryType>
 class DefaultIsoTLSEntry : public IsoTLSEntry {
 public:
-    ~DefaultIsoTLSEntry();
+    ~DefaultIsoTLSEntry() = default;
     
 protected:
     DefaultIsoTLSEntry();
@@ -106,8 +105,6 @@
     // Likewise, this is separate from scavenging. When the TLS is shutting down, we will be asked to
     // scavenge and then we will be asked to destruct.
     void destruct(void* entry) override;
-
-    void scavenge(void* entry) override;
 };
 
 } // namespace bmalloc

Modified: trunk/Source/bmalloc/bmalloc/IsoTLSEntryInlines.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoTLSEntryInlines.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoTLSEntryInlines.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -43,16 +43,13 @@
 
 template<typename EntryType>
 DefaultIsoTLSEntry<EntryType>::DefaultIsoTLSEntry()
-    : IsoTLSEntry(alignof(EntryType), sizeof(EntryType))
+    : IsoTLSEntry(sizeof(EntryType))
 {
+    static_assert(sizeof(EntryType) <= UINT32_MAX);
+    static_assert(sizeof(void*) == alignof(EntryType), "Because IsoTLSEntry includes vtable, it should be the same to the pointer");
 }
 
 template<typename EntryType>
-DefaultIsoTLSEntry<EntryType>::~DefaultIsoTLSEntry()
-{
-}
-
-template<typename EntryType>
 void DefaultIsoTLSEntry<EntryType>::move(void* passedSrc, void* dst)
 {
     EntryType* src = ""
@@ -67,12 +64,5 @@
     entry->~EntryType();
 }
 
-template<typename EntryType>
-void DefaultIsoTLSEntry<EntryType>::scavenge(void* passedEntry)
-{
-    EntryType* entry = static_cast<EntryType*>(passedEntry);
-    entry->scavenge();
-}
-
 } // namespace bmalloc
 

Modified: trunk/Source/bmalloc/bmalloc/IsoTLSInlines.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoTLSInlines.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoTLSInlines.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -60,7 +60,7 @@
         return;
     unsigned offset = handle.allocatorOffset();
     if (offset < tls->m_extent)
-        reinterpret_cast<IsoAllocator<typename api::IsoHeap<Type>::Config>*>(tls->m_data + offset)->scavenge();
+        reinterpret_cast<IsoAllocator<typename api::IsoHeap<Type>::Config>*>(tls->m_data + offset)->scavenge(handle.impl());
     offset = handle.deallocatorOffset();
     if (offset < tls->m_extent)
         reinterpret_cast<IsoDeallocator<typename api::IsoHeap<Type>::Config>*>(tls->m_data + offset)->scavenge();
@@ -74,13 +74,13 @@
     IsoTLS* tls = get();
     if (!tls || offset >= tls->m_extent)
         return allocateSlow<Config>(handle, abortOnFailure);
-    return tls->allocateFast<Config>(offset, abortOnFailure);
+    return tls->allocateFast<Config>(handle, offset, abortOnFailure);
 }
 
-template<typename Config>
-void* IsoTLS::allocateFast(unsigned offset, bool abortOnFailure)
+template<typename Config, typename Type>
+void* IsoTLS::allocateFast(api::IsoHeap<Type>& handle, unsigned offset, bool abortOnFailure)
 {
-    return reinterpret_cast<IsoAllocator<Config>*>(m_data + offset)->allocate(abortOnFailure);
+    return reinterpret_cast<IsoAllocator<Config>*>(m_data + offset)->allocate(handle.impl(), abortOnFailure);
 }
 
 template<typename Config, typename Type>
@@ -108,7 +108,7 @@
     
     IsoTLS* tls = ensureHeapAndEntries(handle);
     
-    return tls->allocateFast<Config>(handle.allocatorOffset(), abortOnFailure);
+    return tls->allocateFast<Config>(handle, handle.allocatorOffset(), abortOnFailure);
 }
 
 template<typename Config, typename Type>

Modified: trunk/Source/bmalloc/bmalloc/IsoTLSLayout.cpp (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/IsoTLSLayout.cpp	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/IsoTLSLayout.cpp	2020-01-16 22:05:00 UTC (rev 254708)
@@ -44,7 +44,9 @@
     std::lock_guard<Mutex> locking(addingMutex);
     if (m_head) {
         RELEASE_BASSERT(m_tail);
-        entry->m_offset = roundUpToMultipleOf(entry->alignment(), m_tail->extent());
+        size_t offset = roundUpToMultipleOf(entry->alignment(), m_tail->extent());
+        RELEASE_BASSERT(offset < UINT_MAX);
+        entry->m_offset = offset;
         std::atomic_thread_fence(std::memory_order_seq_cst);
         m_tail->m_next = entry;
         m_tail = entry;

Added: trunk/Source/bmalloc/bmalloc/Packed.h (0 => 254708)


--- trunk/Source/bmalloc/bmalloc/Packed.h	                        (rev 0)
+++ trunk/Source/bmalloc/bmalloc/Packed.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -0,0 +1,236 @@
+/*
+ * Copyright (C) 2019-2020 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Algorithm.h"
+#include "StdLibExtras.h"
+#include <array>
+
+namespace bmalloc {
+
+template<typename T>
+class Packed {
+public:
+    static_assert(std::is_trivial<T>::value);
+    static constexpr bool isPackedType = true;
+
+    Packed()
+        : Packed(T { })
+    {
+    }
+
+    Packed(const T& value)
+    {
+        memcpy(m_storage.data(), &value, sizeof(T));
+    }
+
+    T get() const
+    {
+        T value { };
+        memcpy(&value, m_storage.data(), sizeof(T));
+        return value;
+    }
+
+    void set(const T& value)
+    {
+        memcpy(m_storage.data(), &value, sizeof(T));
+    }
+
+    Packed<T>& operator=(const T& value)
+    {
+        set(value);
+        return *this;
+    }
+
+    template<class U>
+    T exchange(U&& newValue)
+    {
+        T oldValue = get();
+        set(std::forward<U>(newValue));
+        return oldValue;
+    }
+
+    void swap(Packed& other)
+    {
+        m_storage.swap(other.m_storage);
+    }
+
+    template<typename Other, typename = std::enable_if_t<Other::isPackedType>>
+    void swap(Other& other)
+    {
+        T t1 = get();
+        T t2 = other.get();
+        set(t2);
+        other.set(t1);
+    }
+
+    void swap(T& t2)
+    {
+        T t1 = get();
+        std::swap(t1, t2);
+        set(t1);
+    }
+
+private:
+    std::array<uint8_t, sizeof(T)> m_storage;
+};
+
+// PackedAlignedPtr can take alignment parameter too. PackedAlignedPtr only uses this alignment information if it is profitable: we use
+// alignment information only when we can reduce the size of the storage. Since the pointer width is 36 bits and JSCells are aligned to 16 bytes,
+// we can use 4 bits in Darwin ARM64, we can compact cell pointer into 4 bytes (32 bits).
+template<typename T, size_t alignment = alignof(T)>
+class PackedAlignedPtr {
+public:
+    static_assert(isPowerOfTwo(alignment), "Alignment needs to be power-of-two");
+    static constexpr bool isPackedType = true;
+    static constexpr unsigned alignmentShiftSizeIfProfitable = getLSBSetNonZeroConstexpr(alignment);
+    static constexpr unsigned storageSizeWithoutAlignmentShift = roundUpToMultipleOf<8, uintptr_t>(BOS_EFFECTIVE_ADDRESS_WIDTH) / 8;
+    static constexpr unsigned storageSizeWithAlignmentShift = roundUpToMultipleOf<8, uintptr_t>(BOS_EFFECTIVE_ADDRESS_WIDTH - alignmentShiftSizeIfProfitable) / 8;
+    static constexpr bool isAlignmentShiftProfitable = storageSizeWithoutAlignmentShift > storageSizeWithAlignmentShift;
+    static constexpr unsigned alignmentShiftSize = isAlignmentShiftProfitable ? alignmentShiftSizeIfProfitable : 0;
+    static constexpr unsigned storageSize = storageSizeWithAlignmentShift;
+
+    constexpr PackedAlignedPtr()
+        : m_storage()
+    {
+    }
+
+    constexpr PackedAlignedPtr(std::nullptr_t)
+        : m_storage()
+    {
+    }
+
+    PackedAlignedPtr(T* value)
+    {
+        set(value);
+    }
+
+    T* get() const
+    {
+        // FIXME: PackedPtr<> can load memory with one mov by checking page boundary.
+        // https://bugs.webkit.org/show_bug.cgi?id=197754
+        uintptr_t value = 0;
+#if BCPU(LITTLE_ENDIAN)
+        memcpy(&value, m_storage.data(), storageSize);
+#else
+        memcpy(bitwise_cast<uint8_t*>(&value) + (sizeof(void*) - storageSize), m_storage.data(), storageSize);
+#endif
+        if (isAlignmentShiftProfitable)
+            value <<= alignmentShiftSize;
+        return bitwise_cast<T*>(value);
+    }
+
+    void set(T* passedValue)
+    {
+        uintptr_t value = bitwise_cast<uintptr_t>(passedValue);
+        if (isAlignmentShiftProfitable)
+            value >>= alignmentShiftSize;
+#if BCPU(LITTLE_ENDIAN)
+        memcpy(m_storage.data(), &value, storageSize);
+#else
+        memcpy(m_storage.data(), bitwise_cast<uint8_t*>(&value) + (sizeof(void*) - storageSize), storageSize);
+#endif
+    }
+
+    void clear()
+    {
+        set(nullptr);
+    }
+
+    T* operator->() const { return get(); }
+    T& operator*() const { return *get(); }
+    bool operator!() const { return !get(); }
+
+    // This conversion operator allows implicit conversion to bool but not to other integer types.
+    typedef T* (PackedAlignedPtr::*UnspecifiedBoolType);
+    operator UnspecifiedBoolType() const { return get() ? &PackedAlignedPtr::m_storage : nullptr; }
+    explicit operator bool() const { return get(); }
+
+    PackedAlignedPtr& operator=(T* value)
+    {
+        set(value);
+        return *this;
+    }
+
+    template<class U>
+    T* exchange(U&& newValue)
+    {
+        T* oldValue = get();
+        set(std::forward<U>(newValue));
+        return oldValue;
+    }
+
+    void swap(std::nullptr_t) { clear(); }
+
+    void swap(PackedAlignedPtr& other)
+    {
+        m_storage.swap(other.m_storage);
+    }
+
+    template<typename Other, typename = std::enable_if_t<Other::isPackedType>>
+    void swap(Other& other)
+    {
+        T* t1 = get();
+        T* t2 = other.get();
+        set(t2);
+        other.set(t1);
+    }
+
+    void swap(T* t2)
+    {
+        T* t1 = get();
+        std::swap(t1, t2);
+        set(t1);
+    }
+
+private:
+    std::array<uint8_t, storageSize> m_storage;
+};
+
+template<typename T>
+class Packed<T*> : public PackedAlignedPtr<T, 1> {
+public:
+    using Base = PackedAlignedPtr<T, 1>;
+    using Base::Base;
+};
+
+template<typename T>
+using PackedPtr = Packed<T*>;
+
+template<typename T>
+struct PackedPtrTraits {
+    template<typename U> using RebindTraits = PackedPtrTraits<U>;
+
+    using StorageType = PackedPtr<T>;
+
+    template<class U> static T* exchange(StorageType& ptr, U&& newValue) { return ptr.exchange(newValue); }
+
+    template<typename Other> static void swap(PackedPtr<T>& a, Other& b) { a.swap(b); }
+
+    static T* unwrap(const StorageType& ptr) { return ptr.get(); }
+};
+
+} // namespace bmalloc

Modified: trunk/Source/bmalloc/bmalloc/Scavenger.cpp (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/Scavenger.cpp	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/Scavenger.cpp	2020-01-16 22:05:00 UTC (rev 254708)
@@ -30,6 +30,7 @@
 #include "BulkDecommit.h"
 #include "Environment.h"
 #include "Heap.h"
+#include "IsoHeapImplInlines.h"
 #if BOS(DARWIN)
 #import <dispatch/dispatch.h>
 #import <mach/host_info.h>
@@ -67,7 +68,7 @@
 
 DEFINE_STATIC_PER_PROCESS_STORAGE(Scavenger);
 
-Scavenger::Scavenger(std::lock_guard<Mutex>&)
+Scavenger::Scavenger(const std::lock_guard<Mutex>&)
 {
     BASSERT(!Environment::get()->isDebugHeapEnabled());
 

Modified: trunk/Source/bmalloc/bmalloc/Scavenger.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/Scavenger.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/Scavenger.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -42,7 +42,7 @@
 
 class Scavenger : public StaticPerProcess<Scavenger> {
 public:
-    BEXPORT Scavenger(std::lock_guard<Mutex>&);
+    BEXPORT Scavenger(const std::lock_guard<Mutex>&);
     
     ~Scavenger() = delete;
     

Modified: trunk/Source/bmalloc/bmalloc/VMHeap.cpp (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/VMHeap.cpp	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/VMHeap.cpp	2020-01-16 22:05:00 UTC (rev 254708)
@@ -31,7 +31,7 @@
 
 DEFINE_STATIC_PER_PROCESS_STORAGE(VMHeap);
 
-VMHeap::VMHeap(std::lock_guard<Mutex>&)
+VMHeap::VMHeap(const std::lock_guard<Mutex>&)
 {
 }
 

Modified: trunk/Source/bmalloc/bmalloc/VMHeap.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/VMHeap.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/VMHeap.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -47,7 +47,7 @@
 
 class VMHeap : public StaticPerProcess<VMHeap> {
 public:
-    VMHeap(std::lock_guard<Mutex>&);
+    VMHeap(const std::lock_guard<Mutex>&);
     
     LargeRange tryAllocateLargeChunk(size_t alignment, size_t);
 };

Modified: trunk/Source/bmalloc/bmalloc/Zone.cpp (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/Zone.cpp	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/Zone.cpp	2020-01-16 22:05:00 UTC (rev 254708)
@@ -115,7 +115,7 @@
     .statistics = bmalloc::statistics
 };
 
-Zone::Zone(std::lock_guard<Mutex>&)
+Zone::Zone(const std::lock_guard<Mutex>&)
 {
     malloc_zone_t::size = &bmalloc::zoneSize;
     malloc_zone_t::zone_name = "WebKit Malloc";

Modified: trunk/Source/bmalloc/bmalloc/Zone.h (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc/Zone.h	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc/Zone.h	2020-01-16 22:05:00 UTC (rev 254708)
@@ -42,7 +42,7 @@
     // Enough capacity to track a 64GB heap, so probably enough for anything.
     static constexpr size_t capacity = 2048;
 
-    Zone(std::lock_guard<Mutex>&);
+    Zone(const std::lock_guard<Mutex>&);
     Zone(task_t, memory_reader_t, vm_address_t);
 
     void addRange(Range);

Modified: trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj (254707 => 254708)


--- trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Source/bmalloc/bmalloc.xcodeproj/project.pbxproj	2020-01-16 22:05:00 UTC (rev 254708)
@@ -140,6 +140,7 @@
 		AD14AD2A202529C700890E3B /* ProcessCheck.mm in Sources */ = {isa = PBXBuildFile; fileRef = AD14AD28202529B000890E3B /* ProcessCheck.mm */; };
 		DE8B13B321CC5D9F00A63FCD /* BVMTags.h in Headers */ = {isa = PBXBuildFile; fileRef = DE8B13B221CC5D9F00A63FCD /* BVMTags.h */; settings = {ATTRIBUTES = (Private, ); }; };
 		E31E74802238CA5C005D084A /* StaticPerProcess.h in Headers */ = {isa = PBXBuildFile; fileRef = E31E747F2238CA5B005D084A /* StaticPerProcess.h */; settings = {ATTRIBUTES = (Private, ); }; };
+		E328D84D23CEB38900545B18 /* Packed.h in Headers */ = {isa = PBXBuildFile; fileRef = E328D84C23CEB38900545B18 /* Packed.h */; settings = {ATTRIBUTES = (Private, ); }; };
 		E3A413C9226061140037F470 /* IsoSharedPageInlines.h in Headers */ = {isa = PBXBuildFile; fileRef = E3A413C8226061140037F470 /* IsoSharedPageInlines.h */; settings = {ATTRIBUTES = (Private, ); }; };
 		E3F24402225D2C0100A0E0C3 /* IsoSharedPage.h in Headers */ = {isa = PBXBuildFile; fileRef = E3F24401225D2C0100A0E0C3 /* IsoSharedPage.h */; settings = {ATTRIBUTES = (Private, ); }; };
 		E3F24404225D2C7600A0E0C3 /* IsoSharedPage.cpp in Sources */ = {isa = PBXBuildFile; fileRef = E3F24403225D2C7600A0E0C3 /* IsoSharedPage.cpp */; };
@@ -295,6 +296,7 @@
 		AD14AD28202529B000890E3B /* ProcessCheck.mm */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.objcpp; name = ProcessCheck.mm; path = bmalloc/ProcessCheck.mm; sourceTree = "<group>"; };
 		DE8B13B221CC5D9F00A63FCD /* BVMTags.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; name = BVMTags.h; path = bmalloc/BVMTags.h; sourceTree = "<group>"; };
 		E31E747F2238CA5B005D084A /* StaticPerProcess.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = StaticPerProcess.h; path = bmalloc/StaticPerProcess.h; sourceTree = "<group>"; };
+		E328D84C23CEB38900545B18 /* Packed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = Packed.h; path = bmalloc/Packed.h; sourceTree = "<group>"; };
 		E3A413C8226061140037F470 /* IsoSharedPageInlines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = IsoSharedPageInlines.h; path = bmalloc/IsoSharedPageInlines.h; sourceTree = "<group>"; };
 		E3F24401225D2C0100A0E0C3 /* IsoSharedPage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = IsoSharedPage.h; path = bmalloc/IsoSharedPage.h; sourceTree = "<group>"; };
 		E3F24403225D2C7600A0E0C3 /* IsoSharedPage.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = IsoSharedPage.cpp; path = bmalloc/IsoSharedPage.cpp; sourceTree = "<group>"; };
@@ -532,6 +534,7 @@
 				14C8992A1CC485E70027A057 /* Map.h */,
 				143CB81A19022BC900B16A45 /* Mutex.cpp */,
 				143CB81B19022BC900B16A45 /* Mutex.h */,
+				E328D84C23CEB38900545B18 /* Packed.h */,
 				0F5BF1481F22A8D80029D91D /* PerHeapKind.h */,
 				0F26A7A42054830D0090A141 /* PerProcess.cpp */,
 				14446A0717A61FA400F9EA1D /* PerProcess.h */,
@@ -648,6 +651,7 @@
 				143CB81D19022BC900B16A45 /* Mutex.h in Headers */,
 				144BE11F1CA346520099C8C0 /* Object.h in Headers */,
 				14DD789318F48D0F00950702 /* ObjectType.h in Headers */,
+				E328D84D23CEB38900545B18 /* Packed.h in Headers */,
 				0F5BF1491F22A8D80029D91D /* PerHeapKind.h in Headers */,
 				14DD78CB18F48D7500950702 /* PerProcess.h in Headers */,
 				14DD78CC18F48D7500950702 /* PerThread.h in Headers */,

Modified: trunk/Tools/ChangeLog (254707 => 254708)


--- trunk/Tools/ChangeLog	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Tools/ChangeLog	2020-01-16 22:05:00 UTC (rev 254708)
@@ -1,3 +1,16 @@
+2020-01-16  Yusuke Suzuki  <[email protected]>
+
+        [bmalloc] IsoHeap's initial setup should be small
+        https://bugs.webkit.org/show_bug.cgi?id=206214
+
+        Reviewed by Michael Saboff.
+
+        * TestWebKitAPI/Tests/WTF/bmalloc/IsoHeap.cpp:
+        (assertHasObjects):
+        (assertHasOnlyObjects):
+        (assertClean):
+        (TEST):
+
 2020-01-16  Brady Eidson  <[email protected]>
 
         Make the callAsyncJavaScriptFunction function actually be async (so await works).

Modified: trunk/Tools/TestWebKitAPI/Tests/WTF/bmalloc/IsoHeap.cpp (254707 => 254708)


--- trunk/Tools/TestWebKitAPI/Tests/WTF/bmalloc/IsoHeap.cpp	2020-01-16 22:04:48 UTC (rev 254707)
+++ trunk/Tools/TestWebKitAPI/Tests/WTF/bmalloc/IsoHeap.cpp	2020-01-16 22:05:00 UTC (rev 254708)
@@ -80,6 +80,7 @@
     auto& impl = heap.impl();
     std::lock_guard<bmalloc::Mutex> locker(impl.lock);
     impl.forEachLiveObject(
+        locker,
         [&] (void* object) {
             pointers.erase(object);
         });
@@ -96,6 +97,7 @@
     auto& impl = heap.impl();
     std::lock_guard<bmalloc::Mutex> locker(impl.lock);
     impl.forEachLiveObject(
+        locker,
         [&] (void* object) {
             EXPECT_EQ(pointers.erase(object), 1U);
         });
@@ -108,15 +110,11 @@
     scavengeThisThread();
     if (!Environment::get()->isDebugHeapEnabled()) {
         auto& impl = heap.impl();
-        {
-            std::lock_guard<bmalloc::Mutex> locker(impl.lock);
-            EXPECT_FALSE(impl.numLiveObjects());
-        }
+        EXPECT_FALSE(impl.numLiveObjects());
     }
     heap.scavenge();
     if (!Environment::get()->isDebugHeapEnabled()) {
         auto& impl = heap.impl();
-        std::lock_guard<bmalloc::Mutex> locker(impl.lock);
         EXPECT_FALSE(impl.numCommittedPages());
     }
 }
@@ -187,19 +185,12 @@
         ptrs[i] = nullptr;
     }
     heap.scavenge();
-    unsigned numCommittedPagesBefore;
     auto& impl = heap.impl();
-    {
-        std::lock_guard<bmalloc::Mutex> locker(impl.lock);
-        numCommittedPagesBefore = impl.numCommittedPages();
-    }
+    unsigned numCommittedPagesBefore = impl.numCommittedPages();
     assertHasOnlyObjects(heap, toptrset(ptrs));
     for (unsigned i = ptrs.size() / 2; i--;)
         ptrs.push_back(heap.allocate());
-    {
-        std::lock_guard<bmalloc::Mutex> locker(impl.lock);
-        EXPECT_EQ(numCommittedPagesBefore, impl.numCommittedPages());
-    }
+    EXPECT_EQ(numCommittedPagesBefore, impl.numCommittedPages());
     for (void* ptr : ptrs)
         heap.deallocate(ptr);
     assertClean(heap);
@@ -220,19 +211,12 @@
         ptrs[i] = nullptr;
     }
     heap.scavenge();
-    unsigned numCommittedPagesBefore;
     auto& impl = heap.impl();
-    {
-        std::lock_guard<bmalloc::Mutex> locker(impl.lock);
-        numCommittedPagesBefore = impl.numCommittedPages();
-    }
+    unsigned numCommittedPagesBefore = impl.numCommittedPages();
     assertHasOnlyObjects(heap, toptrset(ptrs));
     for (unsigned i = ptrs.size() / 2; i--;)
         ptrs.push_back(heap.allocate());
-    {
-        std::lock_guard<bmalloc::Mutex> locker(impl.lock);
-        EXPECT_EQ(numCommittedPagesBefore, impl.numCommittedPages());
-    }
+    EXPECT_EQ(numCommittedPagesBefore, impl.numCommittedPages());
     for (void* ptr : ptrs)
         heap.deallocate(ptr);
     assertClean(heap);
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to