Title: [278447] trunk/Source/bmalloc
Revision
278447
Author
msab...@apple.com
Date
2021-06-03 20:45:04 -0700 (Thu, 03 Jun 2021)

Log Message

2021-06-03  Michael Saboff  <msab...@apple.com>

        Unreviewed, rolling out r278278.
        https://bugs.webkit.org/show_bug.cgi?id=226237

        Made  some JSC mini mode and other tests flakey.

        Reverted changeset:

        [bmalloc] Make adaptive scavenging more precise
        https://bugs.webkit.org/show_bug.cgi?id=226237

        * bmalloc/BPlatform.h:
        * bmalloc/Heap.cpp:
        (bmalloc::Heap::decommitLargeRange):
        (bmalloc::Heap::scavenge):
        (bmalloc::Heap::scavengeToHighWatermark):
        (bmalloc::Heap::allocateSmallChunk):
        (bmalloc::Heap::deallocateSmallChunk):
        (bmalloc::Heap::allocateSmallPage):
        (bmalloc::Heap::splitAndAllocate):
        (bmalloc::Heap::allocateLarge):
        (bmalloc::Heap::tryAllocateLargeChunk):
        (bmalloc::Heap::shrinkLarge):
        (bmalloc::Heap::deallocateLarge):
        * bmalloc/Heap.h:
        * bmalloc/IsoDirectory.h:
        * bmalloc/IsoDirectoryInlines.h:
        (bmalloc::passedNumPages>::takeFirstEligible):
        (bmalloc::passedNumPages>::scavenge):
        (bmalloc::passedNumPages>::scavengeToHighWatermark):
        * bmalloc/IsoHeapImpl.h:
        * bmalloc/IsoHeapImplInlines.h:
        (bmalloc::IsoHeapImpl<Config>::scavengeToHighWatermark):
        * bmalloc/IsoSharedHeapInlines.h:
        (bmalloc::IsoSharedHeap::allocateSlow):
        * bmalloc/LargeMap.cpp:
        (bmalloc::LargeMap::add):
        * bmalloc/LargeRange.h:
        (bmalloc::LargeRange::LargeRange):
        (bmalloc::LargeRange::setTotalPhysicalSize):
        (bmalloc::merge):
        (bmalloc::LargeRange::split const):
        (bmalloc::LargeRange::physicalEnd const): Deleted.
        (bmalloc::LargeRange::setPhysicalEnd): Deleted.
        (bmalloc::LargeRange::clearPhysicalEnd): Deleted.
        * bmalloc/Scavenger.cpp:
        (bmalloc::Scavenger::Scavenger):
        (bmalloc::Scavenger::didStartGrowing):
        (bmalloc::Scavenger::scheduleIfUnderMemoryPressure):
        (bmalloc::Scavenger::schedule):
        (bmalloc::Scavenger::timeSinceLastPartialScavenge):
        (bmalloc::Scavenger::scavenge):
        (bmalloc::Scavenger::partialScavenge):
        (bmalloc::Scavenger::threadRunLoop):
        * bmalloc/Scavenger.h:
        * bmalloc/SmallPage.h:

Modified Paths

Diff

Modified: trunk/Source/bmalloc/ChangeLog (278446 => 278447)


--- trunk/Source/bmalloc/ChangeLog	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/ChangeLog	2021-06-04 03:45:04 UTC (rev 278447)
@@ -1,3 +1,61 @@
+2021-06-03  Michael Saboff  <msab...@apple.com>
+
+        Unreviewed, rolling out r278278.
+        https://bugs.webkit.org/show_bug.cgi?id=226237
+
+        Made  some JSC mini mode and other tests flakey.
+
+        Reverted changeset:
+
+        [bmalloc] Make adaptive scavenging more precise
+        https://bugs.webkit.org/show_bug.cgi?id=226237
+
+        * bmalloc/BPlatform.h:
+        * bmalloc/Heap.cpp:
+        (bmalloc::Heap::decommitLargeRange):
+        (bmalloc::Heap::scavenge):
+        (bmalloc::Heap::scavengeToHighWatermark):
+        (bmalloc::Heap::allocateSmallChunk):
+        (bmalloc::Heap::deallocateSmallChunk):
+        (bmalloc::Heap::allocateSmallPage):
+        (bmalloc::Heap::splitAndAllocate):
+        (bmalloc::Heap::allocateLarge):
+        (bmalloc::Heap::tryAllocateLargeChunk):
+        (bmalloc::Heap::shrinkLarge):
+        (bmalloc::Heap::deallocateLarge):
+        * bmalloc/Heap.h:
+        * bmalloc/IsoDirectory.h:
+        * bmalloc/IsoDirectoryInlines.h:
+        (bmalloc::passedNumPages>::takeFirstEligible):
+        (bmalloc::passedNumPages>::scavenge):
+        (bmalloc::passedNumPages>::scavengeToHighWatermark):
+        * bmalloc/IsoHeapImpl.h:
+        * bmalloc/IsoHeapImplInlines.h:
+        (bmalloc::IsoHeapImpl<Config>::scavengeToHighWatermark):
+        * bmalloc/IsoSharedHeapInlines.h:
+        (bmalloc::IsoSharedHeap::allocateSlow):
+        * bmalloc/LargeMap.cpp:
+        (bmalloc::LargeMap::add):
+        * bmalloc/LargeRange.h:
+        (bmalloc::LargeRange::LargeRange):
+        (bmalloc::LargeRange::setTotalPhysicalSize):
+        (bmalloc::merge):
+        (bmalloc::LargeRange::split const):
+        (bmalloc::LargeRange::physicalEnd const): Deleted.
+        (bmalloc::LargeRange::setPhysicalEnd): Deleted.
+        (bmalloc::LargeRange::clearPhysicalEnd): Deleted.
+        * bmalloc/Scavenger.cpp:
+        (bmalloc::Scavenger::Scavenger):
+        (bmalloc::Scavenger::didStartGrowing):
+        (bmalloc::Scavenger::scheduleIfUnderMemoryPressure):
+        (bmalloc::Scavenger::schedule):
+        (bmalloc::Scavenger::timeSinceLastPartialScavenge):
+        (bmalloc::Scavenger::scavenge):
+        (bmalloc::Scavenger::partialScavenge):
+        (bmalloc::Scavenger::threadRunLoop):
+        * bmalloc/Scavenger.h:
+        * bmalloc/SmallPage.h:
+
 2021-05-31  Michael Saboff  <msab...@apple.com>
 
         [bmalloc] Make adaptive scavenging more precise

Modified: trunk/Source/bmalloc/bmalloc/BPlatform.h (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/BPlatform.h	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/BPlatform.h	2021-06-04 03:45:04 UTC (rev 278447)
@@ -309,6 +309,12 @@
 /* This is used for debugging when hacking on how bmalloc calculates its physical footprint. */
 #define ENABLE_PHYSICAL_PAGE_MAP 0
 
+#if BPLATFORM(MAC)
+#define BUSE_PARTIAL_SCAVENGE 1
+#else
+#define BUSE_PARTIAL_SCAVENGE 0
+#endif
+
 #if !defined(BUSE_PRECOMPUTED_CONSTANTS_VMPAGE4K)
 #define BUSE_PRECOMPUTED_CONSTANTS_VMPAGE4K 1
 #endif

Modified: trunk/Source/bmalloc/bmalloc/Heap.cpp (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/Heap.cpp	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/Heap.cpp	2021-06-04 03:45:04 UTC (rev 278447)
@@ -65,7 +65,7 @@
         m_gigacageSize = size;
         ptrdiff_t offset = roundDownToMultipleOf(vmPageSize(), random[1] % (gigacageSize - size));
         void* base = reinterpret_cast<unsigned char*>(gigacageBasePtr) + offset;
-        m_largeFree.add(LargeRange(base, size, 0, 0, base));
+        m_largeFree.add(LargeRange(base, size, 0, 0));
     }
 #endif
     
@@ -108,13 +108,10 @@
 {
     m_footprint -= range.totalPhysicalSize();
     m_freeableMemory -= range.totalPhysicalSize();
-    if (range.totalPhysicalSize()) {
-        decommitter.addLazy(range.begin(), range.physicalEnd() - range.begin());
-        m_hasPendingDecommits = true;
-    }
+    decommitter.addLazy(range.begin(), range.size());
+    m_hasPendingDecommits = true;
     range.setStartPhysicalSize(0);
     range.setTotalPhysicalSize(0);
-    range.clearPhysicalEnd();
     BASSERT(range.isEligibile());
     range.setEligible(false);
 #if ENABLE_PHYSICAL_PAGE_MAP 
@@ -122,7 +119,11 @@
 #endif
 }
 
+#if BUSE(PARTIAL_SCAVENGE)
+void Heap::scavenge(UniqueLockHolder& lock, BulkDecommit& decommitter)
+#else
 void Heap::scavenge(UniqueLockHolder& lock, BulkDecommit& decommitter, size_t& deferredDecommits)
+#endif
 {
     for (auto& list : m_freePages) {
         for (auto* chunk : list) {
@@ -129,11 +130,13 @@
             for (auto* page : chunk->freePages()) {
                 if (!page->hasPhysicalPages())
                     continue;
+#if !BUSE(PARTIAL_SCAVENGE)
                 if (page->usedSinceLastScavenge()) {
                     page->clearUsedSinceLastScavenge();
                     deferredDecommits++;
                     continue;
                 }
+#endif
 
                 size_t pageSize = bmalloc::pageSize(&list - &m_freePages[0]);
                 size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize);
@@ -154,15 +157,37 @@
     }
 
     for (LargeRange& range : m_largeFree) {
+#if BUSE(PARTIAL_SCAVENGE)
+        m_highWatermark = std::min(m_highWatermark, static_cast<void*>(range.begin()));
+#else
         if (range.usedSinceLastScavenge()) {
             range.clearUsedSinceLastScavenge();
             deferredDecommits++;
             continue;
         }
+#endif
         decommitLargeRange(lock, range, decommitter);
     }
+
+#if BUSE(PARTIAL_SCAVENGE)
+    m_freeableMemory = 0;
+#endif
 }
 
+#if BUSE(PARTIAL_SCAVENGE)
+void Heap::scavengeToHighWatermark(UniqueLockHolder& lock, BulkDecommit& decommitter)
+{
+    void* newHighWaterMark = nullptr;
+    for (LargeRange& range : m_largeFree) {
+        if (range.begin() <= m_highWatermark)
+            newHighWaterMark = std::min(newHighWaterMark, static_cast<void*>(range.begin()));
+        else
+            decommitLargeRange(lock, range, decommitter);
+    }
+    m_highWatermark = newHighWaterMark;
+}
+#endif
+
 void Heap::deallocateLineCache(UniqueLockHolder&, LineCache& lineCache)
 {
     for (auto& list : lineCache) {
@@ -193,15 +218,26 @@
 
         m_objectTypes.set(lock, chunk, ObjectType::Small);
 
+        size_t accountedInFreeable = 0;
         forEachPage(chunk, pageSize, [&](SmallPage* page) {
             page->setHasPhysicalPages(true);
+#if !BUSE(PARTIAL_SCAVENGE)
             page->setUsedSinceLastScavenge();
+#endif
             page->setHasFreeLines(lock, true);
             chunk->freePages().push(page);
+            accountedInFreeable += pageSize;
         });
 
-        m_freeableMemory += chunkSize;
+        m_freeableMemory += accountedInFreeable;
 
+        auto metadataSize = Chunk::metadataSize(pageSize);
+        vmDeallocatePhysicalPagesSloppy(chunk->address(sizeof(Chunk)), metadataSize - sizeof(Chunk));
+
+        auto decommitSize = chunkSize - metadataSize - accountedInFreeable;
+        if (decommitSize > 0)
+            vmDeallocatePhysicalPagesSloppy(chunk->address(chunkSize - decommitSize), decommitSize);
+
         m_scavenger->schedule(0);
 
         return chunk;
@@ -217,23 +253,24 @@
     
     size_t size = m_largeAllocated.remove(chunk);
     size_t totalPhysicalSize = size;
-    size_t chunkPageSize = pageSize(pageClass);
-    SmallPage* firstPageWithoutPhysicalPages = nullptr;
 
-    void* physicalEnd = chunk->address(chunk->metadataSize(chunkPageSize));
-    forEachPage(chunk, chunkPageSize, [&](SmallPage* page) {
+    size_t accountedInFreeable = 0;
+
+    bool hasPhysicalPages = true;
+    forEachPage(chunk, pageSize(pageClass), [&](SmallPage* page) {
         size_t physicalSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize(pageClass));
         if (!page->hasPhysicalPages()) {
             totalPhysicalSize -= physicalSize;
-            if (!firstPageWithoutPhysicalPages)
-                firstPageWithoutPhysicalPages = page;
+            hasPhysicalPages = false;
         } else
-            physicalEnd = page->begin()->begin() + physicalSize;
+            accountedInFreeable += physicalSize;
     });
 
-    size_t startPhysicalSize = firstPageWithoutPhysicalPages ? firstPageWithoutPhysicalPages->begin()->begin() - chunk->bytes() : size;
+    m_freeableMemory -= accountedInFreeable;
+    m_freeableMemory += totalPhysicalSize;
 
-    m_largeFree.add(LargeRange(chunk, size, startPhysicalSize, totalPhysicalSize, chunk->address(startPhysicalSize)));
+    size_t startPhysicalSize = hasPhysicalPages ? size : 0;
+    m_largeFree.add(LargeRange(chunk, size, startPhysicalSize, totalPhysicalSize));
 }
 
 SmallPage* Heap::allocateSmallPage(UniqueLockHolder& lock, size_t sizeClass, LineCache& lineCache, FailureAction action)
@@ -246,6 +283,8 @@
     if (!m_lineCache[sizeClass].isEmpty())
         return m_lineCache[sizeClass].popFront();
 
+    m_scavenger->didStartGrowing();
+    
     SmallPage* page = [&]() -> SmallPage* {
         size_t pageClass = m_constants.pageClass(sizeClass);
         
@@ -275,7 +314,9 @@
             m_physicalPageMap.commit(page->begin()->begin(), pageSize);
 #endif
         }
+#if !BUSE(PARTIAL_SCAVENGE)
         page->setUsedSinceLastScavenge();
+#endif
 
         return page;
     }();
@@ -484,7 +525,6 @@
         vmAllocatePhysicalPagesSloppy(range.begin() + range.startPhysicalSize(), range.size() - range.startPhysicalSize());
         range.setStartPhysicalSize(range.size());
         range.setTotalPhysicalSize(range.size());
-        range.setPhysicalEnd(range.begin() + range.size());
 #if ENABLE_PHYSICAL_PAGE_MAP 
         m_physicalPageMap.commit(range.begin(), range.size());
 #endif
@@ -520,6 +560,8 @@
 
     BASSERT(isPowerOfTwo(alignment));
     
+    m_scavenger->didStartGrowing();
+    
     size_t roundedSize = size ? roundUpToMultipleOf(largeAlignment, size) : largeAlignment;
     ASSERT_OR_RETURN_ON_FAILURE(roundedSize >= size); // Check for overflow
     size = roundedSize;
@@ -548,6 +590,9 @@
     m_freeableMemory -= range.totalPhysicalSize();
 
     void* result = splitAndAllocate(lock, range, alignment, size).begin();
+#if BUSE(PARTIAL_SCAVENGE)
+    m_highWatermark = std::max(m_highWatermark, result);
+#endif
     ASSERT_OR_RETURN_ON_FAILURE(result);
     return result;
 
@@ -576,7 +621,7 @@
     PerProcess<Zone>::get()->addRange(Range(memory, size));
 #endif
 
-    return LargeRange(memory, size, 0, 0, memory);
+    return LargeRange(memory, size, 0, 0);
 }
 
 size_t Heap::largeSize(UniqueLockHolder&, void* object)
@@ -589,7 +634,7 @@
     BASSERT(object.size() > newSize);
 
     size_t size = m_largeAllocated.remove(object.begin());
-    LargeRange range = LargeRange(object, size, size, object.begin() + size);
+    LargeRange range = LargeRange(object, size, size);
     splitAndAllocate(lock, range, alignment, newSize);
 
     m_scavenger->schedule(size);
@@ -598,7 +643,7 @@
 void Heap::deallocateLarge(UniqueLockHolder&, void* object)
 {
     size_t size = m_largeAllocated.remove(object);
-    m_largeFree.add(LargeRange(object, size, size, size, static_cast<char*>(object) + size));
+    m_largeFree.add(LargeRange(object, size, size, size));
     m_freeableMemory += size;
     m_scavenger->schedule(size);
 }

Modified: trunk/Source/bmalloc/bmalloc/Heap.h (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/Heap.h	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/Heap.h	2021-06-04 03:45:04 UTC (rev 278447)
@@ -74,7 +74,12 @@
     size_t largeSize(UniqueLockHolder&, void*);
     void shrinkLarge(UniqueLockHolder&, const Range&, size_t);
 
+#if BUSE(PARTIAL_SCAVENGE)
+    void scavengeToHighWatermark(UniqueLockHolder&, BulkDecommit&);
+    void scavenge(UniqueLockHolder&, BulkDecommit&);
+#else
     void scavenge(UniqueLockHolder&, BulkDecommit&, size_t& deferredDecommits);
+#endif
     void scavenge(UniqueLockHolder&, BulkDecommit&, size_t& freed, size_t goal);
 
     size_t freeableMemory(UniqueLockHolder&);
@@ -142,6 +147,10 @@
 #if ENABLE_PHYSICAL_PAGE_MAP 
     PhysicalPageMap m_physicalPageMap;
 #endif
+    
+#if BUSE(PARTIAL_SCAVENGE)
+    void* m_highWatermark { nullptr };
+#endif
 };
 
 inline void Heap::allocateSmallBumpRanges(

Modified: trunk/Source/bmalloc/bmalloc/IsoDirectory.h (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/IsoDirectory.h	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/IsoDirectory.h	2021-06-04 03:45:04 UTC (rev 278447)
@@ -76,6 +76,9 @@
     // Iterate over all empty and committed pages, and put them into the vector. This also records the
     // pages as being decommitted. It's the caller's job to do the actual decommitting.
     void scavenge(const LockHolder&, Vector<DeferredDecommit>&);
+#if BUSE(PARTIAL_SCAVENGE)
+    void scavengeToHighWatermark(const LockHolder&, Vector<DeferredDecommit>&);
+#endif
 
     template<typename Func>
     void forEachCommittedPage(const LockHolder&, const Func&);
@@ -90,6 +93,9 @@
     Bits<numPages> m_empty;
     Bits<numPages> m_committed;
     unsigned m_firstEligibleOrDecommitted { 0 };
+#if BUSE(PARTIAL_SCAVENGE)
+    unsigned m_highWatermark { 0 };
+#endif
 };
 
 } // namespace bmalloc

Modified: trunk/Source/bmalloc/bmalloc/IsoDirectoryInlines.h (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/IsoDirectoryInlines.h	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/IsoDirectoryInlines.h	2021-06-04 03:45:04 UTC (rev 278447)
@@ -50,7 +50,12 @@
     if (pageIndex >= numPages)
         return EligibilityKind::Full;
 
+#if BUSE(PARTIAL_SCAVENGE)
+    m_highWatermark = std::max(pageIndex, m_highWatermark);
+#endif
+
     Scavenger& scavenger = *Scavenger::get();
+    scavenger.didStartGrowing();
     
     IsoPage<Config>* page = m_pages[pageIndex].get();
     
@@ -141,9 +146,25 @@
         [&] (size_t index) {
             scavengePage(locker, index, decommits);
         });
+#if BUSE(PARTIAL_SCAVENGE)
+    m_highWatermark = 0;
+#endif
 }
 
+#if BUSE(PARTIAL_SCAVENGE)
 template<typename Config, unsigned passedNumPages>
+void IsoDirectory<Config, passedNumPages>::scavengeToHighWatermark(const LockHolder& locker, Vector<DeferredDecommit>& decommits)
+{
+    (m_empty & m_committed).forEachSetBit(
+        [&] (size_t index) {
+            if (index > m_highWatermark)
+                scavengePage(locker, index, decommits);
+        });
+    m_highWatermark = 0;
+}
+#endif
+
+template<typename Config, unsigned passedNumPages>
 template<typename Func>
 void IsoDirectory<Config, passedNumPages>::forEachCommittedPage(const LockHolder&, const Func& func)
 {

Modified: trunk/Source/bmalloc/bmalloc/IsoHeapImpl.h (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/IsoHeapImpl.h	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/IsoHeapImpl.h	2021-06-04 03:45:04 UTC (rev 278447)
@@ -49,6 +49,9 @@
     virtual ~IsoHeapImplBase();
     
     virtual void scavenge(Vector<DeferredDecommit>&) = 0;
+#if BUSE(PARTIAL_SCAVENGE)
+    virtual void scavengeToHighWatermark(Vector<DeferredDecommit>&) = 0;
+#endif
     
     void scavengeNow();
     static void finishScavenging(Vector<DeferredDecommit>&);
@@ -109,6 +112,9 @@
     void didBecomeEligibleOrDecommited(const LockHolder&, IsoDirectory<Config, IsoDirectoryPage<Config>::numPages>*);
     
     void scavenge(Vector<DeferredDecommit>&) override;
+#if BUSE(PARTIAL_SCAVENGE)
+    void scavengeToHighWatermark(Vector<DeferredDecommit>&) override;
+#endif
 
     unsigned allocatorOffset();
     unsigned deallocatorOffset();

Modified: trunk/Source/bmalloc/bmalloc/IsoHeapImplInlines.h (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/IsoHeapImplInlines.h	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/IsoHeapImplInlines.h	2021-06-04 03:45:04 UTC (rev 278447)
@@ -121,6 +121,21 @@
     m_directoryHighWatermark = 0;
 }
 
+#if BUSE(PARTIAL_SCAVENGE)
+template<typename Config>
+void IsoHeapImpl<Config>::scavengeToHighWatermark(Vector<DeferredDecommit>& decommits)
+{
+    LockHolder locker(this->lock);
+    if (!m_directoryHighWatermark)
+        m_inlineDirectory.scavengeToHighWatermark(locker, decommits);
+    for (IsoDirectoryPage<Config>* page = m_headDirectory.get(); page; page = page->next) {
+        if (page->index() >= m_directoryHighWatermark)
+            page->payload.scavengeToHighWatermark(locker, decommits);
+    }
+    m_directoryHighWatermark = 0;
+}
+#endif
+
 inline size_t IsoHeapImplBase::freeableMemory()
 {
     return m_freeableMemory;

Modified: trunk/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/IsoSharedHeapInlines.h	2021-06-04 03:45:04 UTC (rev 278447)
@@ -63,6 +63,7 @@
 BNO_INLINE void* IsoSharedHeap::allocateSlow(const LockHolder& locker, bool abortOnFailure)
 {
     Scavenger& scavenger = *Scavenger::get();
+    scavenger.didStartGrowing();
     scavenger.scheduleIfUnderMemoryPressure(IsoSharedPage::pageSize);
 
     IsoSharedPage* page = IsoSharedPage::tryCreate();

Modified: trunk/Source/bmalloc/bmalloc/LargeMap.cpp (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/LargeMap.cpp	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/LargeMap.cpp	2021-06-04 03:45:04 UTC (rev 278447)
@@ -76,7 +76,9 @@
         merged = merge(merged, m_free.pop(i--));
     }
 
+#if !BUSE(PARTIAL_SCAVENGE)
     merged.setUsedSinceLastScavenge();
+#endif
     m_free.push(merged);
 }
 

Modified: trunk/Source/bmalloc/bmalloc/LargeRange.h (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/LargeRange.h	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/LargeRange.h	2021-06-04 03:45:04 UTC (rev 278447)
@@ -37,29 +37,40 @@
         : Range()
         , m_startPhysicalSize(0)
         , m_totalPhysicalSize(0)
-        , m_physicalEnd(begin())
+#if !BUSE(PARTIAL_SCAVENGE)
         , m_isEligible(true)
         , m_usedSinceLastScavenge(false)
+#endif
     {
     }
 
-    LargeRange(const Range& other, size_t startPhysicalSize, size_t totalPhysicalSize, void* physicalEnd)
+    LargeRange(const Range& other, size_t startPhysicalSize, size_t totalPhysicalSize)
         : Range(other)
         , m_startPhysicalSize(startPhysicalSize)
         , m_totalPhysicalSize(totalPhysicalSize)
-        , m_physicalEnd(static_cast<char*>(physicalEnd))
+#if !BUSE(PARTIAL_SCAVENGE)
         , m_isEligible(true)
         , m_usedSinceLastScavenge(false)
+#endif
     {
         BASSERT(this->size() >= this->totalPhysicalSize());
         BASSERT(this->totalPhysicalSize() >= this->startPhysicalSize());
     }
 
-    LargeRange(void* begin, size_t size, size_t startPhysicalSize, size_t totalPhysicalSize, void* physicalEnd, bool usedSinceLastScavenge = false)
+#if BUSE(PARTIAL_SCAVENGE)
+    LargeRange(void* begin, size_t size, size_t startPhysicalSize, size_t totalPhysicalSize)
         : Range(begin, size)
         , m_startPhysicalSize(startPhysicalSize)
         , m_totalPhysicalSize(totalPhysicalSize)
-        , m_physicalEnd(static_cast<char*>(physicalEnd))
+    {
+        BASSERT(this->size() >= this->totalPhysicalSize());
+        BASSERT(this->totalPhysicalSize() >= this->startPhysicalSize());
+    }
+#else
+    LargeRange(void* begin, size_t size, size_t startPhysicalSize, size_t totalPhysicalSize, bool usedSinceLastScavenge = false)
+        : Range(begin, size)
+        , m_startPhysicalSize(startPhysicalSize)
+        , m_totalPhysicalSize(totalPhysicalSize)
         , m_isEligible(true)
         , m_usedSinceLastScavenge(usedSinceLastScavenge)
     {
@@ -66,6 +77,7 @@
         BASSERT(this->size() >= this->totalPhysicalSize());
         BASSERT(this->totalPhysicalSize() >= this->startPhysicalSize());
     }
+#endif
 
     // Returns a lower bound on physical size at the start of the range. Ranges that
     // span non-physical fragments use this number to remember the physical size of
@@ -86,12 +98,6 @@
     // doesn't really affect accuracy.
     size_t totalPhysicalSize() const { return m_totalPhysicalSize; }
     void setTotalPhysicalSize(size_t totalPhysicalSize) { m_totalPhysicalSize = totalPhysicalSize; }
-    
-    // This is the address past the end of physical memory in this range.
-    // When decomitting this range, we decommitt [begin(), physicalEnd).
-    char* physicalEnd() const { return m_physicalEnd; }
-    void setPhysicalEnd(void* physicalEnd) { m_physicalEnd = static_cast<char*>(physicalEnd); }
-    void clearPhysicalEnd() { m_physicalEnd = begin(); }
 
     std::pair<LargeRange, LargeRange> split(size_t) const;
 
@@ -98,9 +104,11 @@
     void setEligible(bool eligible) { m_isEligible = eligible; }
     bool isEligibile() const { return m_isEligible; }
 
+#if !BUSE(PARTIAL_SCAVENGE)
     bool usedSinceLastScavenge() const { return m_usedSinceLastScavenge; }
     void clearUsedSinceLastScavenge() { m_usedSinceLastScavenge = false; }
     void setUsedSinceLastScavenge() { m_usedSinceLastScavenge = true; }
+#endif
 
     bool operator<(const void* other) const { return begin() < other; }
     bool operator<(const LargeRange& other) const { return begin() < other.begin(); }
@@ -108,9 +116,12 @@
 private:
     size_t m_startPhysicalSize;
     size_t m_totalPhysicalSize;
-    char* m_physicalEnd;
+#if BUSE(PARTIAL_SCAVENGE)
+    bool m_isEligible { true };
+#else
     unsigned m_isEligible: 1;
     unsigned m_usedSinceLastScavenge: 1;
+#endif
 };
 
 inline bool canMerge(const LargeRange& a, const LargeRange& b)
@@ -133,17 +144,18 @@
 inline LargeRange merge(const LargeRange& a, const LargeRange& b)
 {
     const LargeRange& left = std::min(a, b);
-    const LargeRange& right = std::max(a, b);
-    void* physicalEnd = right.totalPhysicalSize() ? right.physicalEnd() : left.physicalEnd();
+#if !BUSE(PARTIAL_SCAVENGE)
     bool mergedUsedSinceLastScavenge = a.usedSinceLastScavenge() || b.usedSinceLastScavenge();
+#endif
     if (left.size() == left.startPhysicalSize()) {
         return LargeRange(
             left.begin(),
             a.size() + b.size(),
             a.startPhysicalSize() + b.startPhysicalSize(),
-            a.totalPhysicalSize() + b.totalPhysicalSize(),
-            physicalEnd
+            a.totalPhysicalSize() + b.totalPhysicalSize()
+#if !BUSE(PARTIAL_SCAVENGE)
             , mergedUsedSinceLastScavenge
+#endif
         );
         
     }
@@ -152,9 +164,10 @@
         left.begin(),
         a.size() + b.size(),
         left.startPhysicalSize(),
-        a.totalPhysicalSize() + b.totalPhysicalSize(),
-        physicalEnd
+        a.totalPhysicalSize() + b.totalPhysicalSize()
+#if !BUSE(PARTIAL_SCAVENGE)
         , mergedUsedSinceLastScavenge
+#endif
     );
 }
 
@@ -162,12 +175,11 @@
 {
     BASSERT(leftSize <= this->size());
     size_t rightSize = this->size() - leftSize;
-    char* physicalEnd = this->physicalEnd();
 
     if (leftSize <= startPhysicalSize()) {
         BASSERT(totalPhysicalSize() >= leftSize);
-        LargeRange left(begin(), leftSize, leftSize, leftSize, std::min(physicalEnd, begin() + leftSize));
-        LargeRange right(left.end(), rightSize, startPhysicalSize() - leftSize, totalPhysicalSize() - leftSize, std::max(physicalEnd, left.end()));
+        LargeRange left(begin(), leftSize, leftSize, leftSize);
+        LargeRange right(left.end(), rightSize, startPhysicalSize() - leftSize, totalPhysicalSize() - leftSize);
         return std::make_pair(left, right);
     }
 
@@ -182,8 +194,8 @@
         rightTotalPhysicalSize = rightSize;
     }
 
-    LargeRange left(begin(), leftSize, startPhysicalSize(), leftTotalPhysicalSize, std::min(physicalEnd, begin() + leftSize));
-    LargeRange right(left.end(), rightSize, 0, rightTotalPhysicalSize, std::max(physicalEnd, left.end()));
+    LargeRange left(begin(), leftSize, startPhysicalSize(), leftTotalPhysicalSize);
+    LargeRange right(left.end(), rightSize, 0, rightTotalPhysicalSize);
     return std::make_pair(left, right);
 }
 

Modified: trunk/Source/bmalloc/bmalloc/Scavenger.cpp (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/Scavenger.cpp	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/Scavenger.cpp	2021-06-04 03:45:04 UTC (rev 278447)
@@ -85,7 +85,11 @@
     dispatch_resume(m_pressureHandlerDispatchSource);
     dispatch_release(queue);
 #endif
+#if BUSE(PARTIAL_SCAVENGE)
+    m_waitTime = std::chrono::milliseconds(m_isInMiniMode ? 200 : 2000);
+#else
     m_waitTime = std::chrono::milliseconds(10);
+#endif
 
     m_thread = std::thread(&threadEntryPoint, this);
 }
@@ -116,6 +120,12 @@
     m_condition.notify_all();
 }
 
+void Scavenger::didStartGrowing()
+{
+    // We don't really need to lock here, since this is just a heuristic.
+    m_isProbablyGrowing = true;
+}
+
 void Scavenger::scheduleIfUnderMemoryPressure(size_t bytes)
 {
     LockHolder lock(mutex());
@@ -136,6 +146,7 @@
     if (!isUnderMemoryPressure())
         return;
 
+    m_isProbablyGrowing = false;
     run(lock);
 }
 
@@ -147,6 +158,7 @@
     if (willRunSoon())
         return;
     
+    m_isProbablyGrowing = false;
     runSoon(lock);
 }
 
@@ -175,6 +187,14 @@
     return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - m_lastFullScavengeTime);
 }
 
+#if BUSE(PARTIAL_SCAVENGE)
+std::chrono::milliseconds Scavenger::timeSinceLastPartialScavenge()
+{
+    UniqueLockHolder lock(mutex());
+    return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - m_lastPartialScavengeTime);
+}
+#endif
+
 void Scavenger::enableMiniMode()
 {
     m_isInMiniMode = true; // We just store to this racily. The scavenger thread will eventually pick up the right value.
@@ -200,17 +220,25 @@
 
         {
             PrintTime printTime("\nfull scavenge under lock time");
+#if !BUSE(PARTIAL_SCAVENGE)
             size_t deferredDecommits = 0;
+#endif
             UniqueLockHolder lock(Heap::mutex());
             for (unsigned i = numHeaps; i--;) {
                 if (!isActiveHeapKind(static_cast<HeapKind>(i)))
                     continue;
+#if BUSE(PARTIAL_SCAVENGE)
+                PerProcess<PerHeapKind<Heap>>::get()->at(i).scavenge(lock, decommitter);
+#else
                 PerProcess<PerHeapKind<Heap>>::get()->at(i).scavenge(lock, decommitter, deferredDecommits);
+#endif
             }
             decommitter.processEager();
 
+#if !BUSE(PARTIAL_SCAVENGE)
             if (deferredDecommits)
                 m_state = State::RunSoon;
+#endif
         }
 
         {
@@ -251,6 +279,78 @@
     }
 }
 
+#if BUSE(PARTIAL_SCAVENGE)
+void Scavenger::partialScavenge()
+{
+    if (!m_isEnabled)
+        return;
+
+    UniqueLockHolder lock(m_scavengingMutex);
+
+    if (verbose) {
+        fprintf(stderr, "--------------------------------\n");
+        fprintf(stderr, "--before partial scavenging--\n");
+        dumpStats();
+    }
+
+    {
+        BulkDecommit decommitter;
+        {
+            PrintTime printTime("\npartialScavenge under lock time");
+            UniqueLockHolder lock(Heap::mutex());
+            for (unsigned i = numHeaps; i--;) {
+                if (!isActiveHeapKind(static_cast<HeapKind>(i)))
+                    continue;
+                Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(i);
+                size_t freeableMemory = heap.freeableMemory(lock);
+                if (freeableMemory < 4 * MB)
+                    continue;
+                heap.scavengeToHighWatermark(lock, decommitter);
+            }
+
+            decommitter.processEager();
+        }
+
+        {
+            PrintTime printTime("partialScavenge lazy decommit time");
+            decommitter.processLazy();
+        }
+
+        {
+            PrintTime printTime("partialScavenge mark all as eligible time");
+            LockHolder lock(Heap::mutex());
+            for (unsigned i = numHeaps; i--;) {
+                if (!isActiveHeapKind(static_cast<HeapKind>(i)))
+                    continue;
+                Heap& heap = PerProcess<PerHeapKind<Heap>>::get()->at(i);
+                heap.markAllLargeAsEligibile(lock);
+            }
+        }
+    }
+
+    {
+        RELEASE_BASSERT(!m_deferredDecommits.size());
+        AllIsoHeaps::get()->forEach(
+            [&] (IsoHeapImplBase& heap) {
+                heap.scavengeToHighWatermark(m_deferredDecommits);
+            });
+        IsoHeapImplBase::finishScavenging(m_deferredDecommits);
+        m_deferredDecommits.shrink(0);
+    }
+
+    if (verbose) {
+        fprintf(stderr, "--after partial scavenging--\n");
+        dumpStats();
+        fprintf(stderr, "--------------------------------\n");
+    }
+
+    {
+        UniqueLockHolder lock(mutex());
+        m_lastPartialScavengeTime = std::chrono::steady_clock::now();
+    }
+}
+#endif
+
 size_t Scavenger::freeableMemory()
 {
     size_t result = 0;
@@ -332,6 +432,69 @@
             fprintf(stderr, "--------------------------------\n");
         }
 
+#if BUSE(PARTIAL_SCAVENGE)
+        enum class ScavengeMode {
+            None,
+            Partial,
+            Full
+        };
+
+        size_t freeableMemory = this->freeableMemory();
+
+        ScavengeMode scavengeMode = [&] {
+            auto timeSinceLastFullScavenge = this->timeSinceLastFullScavenge();
+            auto timeSinceLastPartialScavenge = this->timeSinceLastPartialScavenge();
+            auto timeSinceLastScavenge = std::min(timeSinceLastPartialScavenge, timeSinceLastFullScavenge);
+
+            if (isUnderMemoryPressure() && freeableMemory > 1 * MB && timeSinceLastScavenge > std::chrono::milliseconds(5))
+                return ScavengeMode::Full;
+
+            if (!m_isProbablyGrowing) {
+                if (timeSinceLastFullScavenge < std::chrono::milliseconds(1000) && !m_isInMiniMode)
+                    return ScavengeMode::Partial;
+                return ScavengeMode::Full;
+            }
+
+            if (m_isInMiniMode) {
+                if (timeSinceLastFullScavenge < std::chrono::milliseconds(200))
+                    return ScavengeMode::Partial;
+                return ScavengeMode::Full;
+            }
+
+#if BCPU(X86_64)
+            auto partialScavengeInterval = std::chrono::milliseconds(12000);
+#else
+            auto partialScavengeInterval = std::chrono::milliseconds(8000);
+#endif
+            if (timeSinceLastScavenge < partialScavengeInterval) {
+                // Rate limit partial scavenges.
+                return ScavengeMode::None;
+            }
+            if (freeableMemory < 25 * MB)
+                return ScavengeMode::None;
+            if (5 * freeableMemory < footprint())
+                return ScavengeMode::None;
+            return ScavengeMode::Partial;
+        }();
+
+        m_isProbablyGrowing = false;
+
+        switch (scavengeMode) {
+        case ScavengeMode::None: {
+            runSoon();
+            break;
+        }
+        case ScavengeMode::Partial: {
+            partialScavenge();
+            runSoon();
+            break;
+        }
+        case ScavengeMode::Full: {
+            scavenge();
+            break;
+        }
+        }
+#else
         std::chrono::steady_clock::time_point start { std::chrono::steady_clock::now() };
         
         scavenge();
@@ -346,13 +509,14 @@
         // FIXME: We need to investigate mini-mode's adjustment.
         // https://bugs.webkit.org/show_bug.cgi?id=203987
         if (!m_isInMiniMode) {
-            timeSpentScavenging *= s_newWaitMultiplier;
+            timeSpentScavenging *= 150;
             std::chrono::milliseconds newWaitTime = std::chrono::duration_cast<std::chrono::milliseconds>(timeSpentScavenging);
-            m_waitTime = std::min(std::max(newWaitTime, std::chrono::milliseconds(s_minWaitTimeMilliseconds)), std::chrono::milliseconds(s_maxWaitTimeMilliseconds));
+            m_waitTime = std::min(std::max(newWaitTime, std::chrono::milliseconds(100)), std::chrono::milliseconds(10000));
         }
 
         if (verbose)
             fprintf(stderr, "new wait time %lldms\n", static_cast<long long int>(m_waitTime.count()));
+#endif
     }
 }
 

Modified: trunk/Source/bmalloc/bmalloc/Scavenger.h (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/Scavenger.h	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/Scavenger.h	2021-06-04 03:45:04 UTC (rev 278447)
@@ -59,6 +59,7 @@
     bool willRunSoon() { return m_state > State::Sleep; }
     void runSoon();
     
+    BEXPORT void didStartGrowing();
     BEXPORT void scheduleIfUnderMemoryPressure(size_t bytes);
     BEXPORT void schedule(size_t bytes);
 
@@ -91,10 +92,15 @@
     void setThreadName(const char*);
 
     std::chrono::milliseconds timeSinceLastFullScavenge();
+#if BUSE(PARTIAL_SCAVENGE)
+    std::chrono::milliseconds timeSinceLastPartialScavenge();
+    void partialScavenge();
+#endif
 
     std::atomic<State> m_state { State::Sleep };
     size_t m_scavengerBytes { 0 };
     std::chrono::milliseconds m_waitTime;
+    bool m_isProbablyGrowing { false };
     bool m_isInMiniMode { false };
     
     Mutex m_scavengingMutex;
@@ -102,6 +108,9 @@
 
     std::thread m_thread;
     std::chrono::steady_clock::time_point m_lastFullScavengeTime { std::chrono::steady_clock::now() };
+#if BUSE(PARTIAL_SCAVENGE)
+    std::chrono::steady_clock::time_point m_lastPartialScavengeTime { std::chrono::steady_clock::now() };
+#endif
 
 #if BOS(DARWIN)
     dispatch_source_t m_pressureHandlerDispatchSource;
@@ -108,16 +117,6 @@
     qos_class_t m_requestedScavengerThreadQOSClass { QOS_CLASS_USER_INITIATED };
 #endif
     
-#if BPLATFORM(MAC)
-    const unsigned s_newWaitMultiplier = 300;
-    const unsigned s_minWaitTimeMilliseconds = 750;
-    const unsigned s_maxWaitTimeMilliseconds = 20000;
-#else
-    const unsigned s_newWaitMultiplier = 150;
-    const unsigned s_minWaitTimeMilliseconds = 100;
-    const unsigned s_maxWaitTimeMilliseconds = 10000;
-#endif
-
     Vector<DeferredDecommit> m_deferredDecommits;
     bool m_isEnabled { true };
 };

Modified: trunk/Source/bmalloc/bmalloc/SmallPage.h (278446 => 278447)


--- trunk/Source/bmalloc/bmalloc/SmallPage.h	2021-06-04 03:42:58 UTC (rev 278446)
+++ trunk/Source/bmalloc/bmalloc/SmallPage.h	2021-06-04 03:45:04 UTC (rev 278447)
@@ -51,9 +51,11 @@
     bool hasPhysicalPages() { return m_hasPhysicalPages; }
     void setHasPhysicalPages(bool hasPhysicalPages) { m_hasPhysicalPages = hasPhysicalPages; }
 
+#if !BUSE(PARTIAL_SCAVENGE)
     bool usedSinceLastScavenge() { return m_usedSinceLastScavenge; }
     void clearUsedSinceLastScavenge() { m_usedSinceLastScavenge = false; }
     void setUsedSinceLastScavenge() { m_usedSinceLastScavenge = true; }
+#endif
 
     SmallLine* begin();
 
@@ -63,7 +65,9 @@
 private:
     unsigned char m_hasFreeLines: 1;
     unsigned char m_hasPhysicalPages: 1;
+#if !BUSE(PARTIAL_SCAVENGE)
     unsigned char m_usedSinceLastScavenge: 1;
+#endif
     unsigned char m_refCount: 7;
     unsigned char m_sizeClass;
     unsigned char m_slide;
_______________________________________________
webkit-changes mailing list
webkit-changes@lists.webkit.org
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to