Title: [198702] trunk/Source/bmalloc
Revision
198702
Author
[email protected]
Date
2016-03-25 16:46:01 -0700 (Fri, 25 Mar 2016)

Log Message

bmalloc: support physical page sizes that don't match the virtual page size
https://bugs.webkit.org/show_bug.cgi?id=155898

Reviewed by Gavin Barraclough.

This is a memory savings on iOS devices where the virtual page size
is 16kB but the physical page size is 4kB.

* bmalloc/Chunk.h:
(bmalloc::Chunk::Chunk): smallPageSize is now unrelated to the OS's
page size -- it just reflects the optimal unit of memory to recycle
between small objects.

We only need to round up to largeAlignment because small objects allocate
as subsets of large objects now.

(bmalloc::Chunk::page):
(bmalloc::Object::pageBegin):
(bmalloc::Object::line): Adopt smallPageSize.

* bmalloc/Heap.cpp:
(bmalloc::Heap::initializeLineMetadata):
(bmalloc::Heap::allocateSmallPage):
(bmalloc::Heap::allocateLarge): Adopt smallPageSize.

(bmalloc::Heap::splitAndAllocate):
(bmalloc::Heap::tryAllocateXLarge):
(bmalloc::Heap::shrinkXLarge): Adopt vmPageSizePhysical(). We want the
physical page size because that's the unit at which the hardware MMU
will recycle memory.

* bmalloc/Sizes.h: Adopt smallPageSize.

* bmalloc/VMAllocate.h:
(bmalloc::vmPageSizePhysical):
(bmalloc::vmPageSize): Distinguish between page size, which is the virtual
memory page size advertised by the OS, and physical page size, which the
true hardware page size.

(bmalloc::vmSize):
(bmalloc::vmValidate):
(bmalloc::vmValidatePhysical):
(bmalloc::tryVMAllocate):
(bmalloc::vmDeallocatePhysicalPages):
(bmalloc::vmAllocatePhysicalPages):
(bmalloc::vmDeallocatePhysicalPagesSloppy):
(bmalloc::vmAllocatePhysicalPagesSloppy): Adopt vmPageSize() and
vmPageSizePhyiscal().

* bmalloc/Vector.h:
(bmalloc::Vector::initialCapacity):
(bmalloc::Vector<T>::shrink):
(bmalloc::Vector<T>::shrinkCapacity):
(bmalloc::Vector<T>::growCapacity): Adopt vmPageSize(). We'd prefer to
use vmPageSizePhysical() but mmap() doesn't support it.

* bmalloc/XLargeMap.cpp: #include.

Modified Paths

Diff

Modified: trunk/Source/bmalloc/ChangeLog (198701 => 198702)


--- trunk/Source/bmalloc/ChangeLog	2016-03-25 23:45:13 UTC (rev 198701)
+++ trunk/Source/bmalloc/ChangeLog	2016-03-25 23:46:01 UTC (rev 198702)
@@ -1,5 +1,65 @@
 2016-03-25  Geoffrey Garen  <[email protected]>
 
+        bmalloc: support physical page sizes that don't match the virtual page size
+        https://bugs.webkit.org/show_bug.cgi?id=155898
+
+        Reviewed by Gavin Barraclough.
+
+        This is a memory savings on iOS devices where the virtual page size
+        is 16kB but the physical page size is 4kB.
+
+        * bmalloc/Chunk.h:
+        (bmalloc::Chunk::Chunk): smallPageSize is now unrelated to the OS's
+        page size -- it just reflects the optimal unit of memory to recycle
+        between small objects.
+
+        We only need to round up to largeAlignment because small objects allocate
+        as subsets of large objects now.
+
+        (bmalloc::Chunk::page):
+        (bmalloc::Object::pageBegin):
+        (bmalloc::Object::line): Adopt smallPageSize.
+
+        * bmalloc/Heap.cpp:
+        (bmalloc::Heap::initializeLineMetadata):
+        (bmalloc::Heap::allocateSmallPage):
+        (bmalloc::Heap::allocateLarge): Adopt smallPageSize.
+
+        (bmalloc::Heap::splitAndAllocate):
+        (bmalloc::Heap::tryAllocateXLarge):
+        (bmalloc::Heap::shrinkXLarge): Adopt vmPageSizePhysical(). We want the
+        physical page size because that's the unit at which the hardware MMU
+        will recycle memory.
+
+        * bmalloc/Sizes.h: Adopt smallPageSize.
+
+        * bmalloc/VMAllocate.h:
+        (bmalloc::vmPageSizePhysical):
+        (bmalloc::vmPageSize): Distinguish between page size, which is the virtual
+        memory page size advertised by the OS, and physical page size, which the
+        true hardware page size.
+
+        (bmalloc::vmSize):
+        (bmalloc::vmValidate):
+        (bmalloc::vmValidatePhysical):
+        (bmalloc::tryVMAllocate):
+        (bmalloc::vmDeallocatePhysicalPages):
+        (bmalloc::vmAllocatePhysicalPages):
+        (bmalloc::vmDeallocatePhysicalPagesSloppy):
+        (bmalloc::vmAllocatePhysicalPagesSloppy): Adopt vmPageSize() and
+        vmPageSizePhyiscal().
+
+        * bmalloc/Vector.h:
+        (bmalloc::Vector::initialCapacity):
+        (bmalloc::Vector<T>::shrink):
+        (bmalloc::Vector<T>::shrinkCapacity):
+        (bmalloc::Vector<T>::growCapacity): Adopt vmPageSize(). We'd prefer to
+        use vmPageSizePhysical() but mmap() doesn't support it.
+
+        * bmalloc/XLargeMap.cpp: #include.
+
+2016-03-25  Geoffrey Garen  <[email protected]>
+
         Unreviewed, rolling in r198679.
 
         r198679 was just a rename. The regression was caused by r198675 and then

Modified: trunk/Source/bmalloc/bmalloc/Chunk.h (198701 => 198702)


--- trunk/Source/bmalloc/bmalloc/Chunk.h	2016-03-25 23:45:13 UTC (rev 198701)
+++ trunk/Source/bmalloc/bmalloc/Chunk.h	2016-03-25 23:46:01 UTC (rev 198702)
@@ -81,14 +81,14 @@
     // We use the X's for boundary tags and the O's for edge sentinels.
 
     std::array<SmallLine, chunkSize / smallLineSize> m_lines;
-    std::array<SmallPage, chunkSize / vmPageSize> m_pages;
+    std::array<SmallPage, chunkSize / smallPageSize> m_pages;
     std::array<BoundaryTag, boundaryTagCount> m_boundaryTags;
-    char m_memory[] __attribute__((aligned(2 * smallMax + 0)));
+    char m_memory[] __attribute__((aligned(largeAlignment + 0)));
 };
 
 static_assert(sizeof(Chunk) + largeMax <= chunkSize, "largeMax is too big");
 static_assert(
-    sizeof(Chunk) % vmPageSize + 2 * smallMax <= vmPageSize,
+    sizeof(Chunk) % smallPageSize + 2 * smallMax <= smallPageSize,
     "the first page of object memory in a small chunk must be able to allocate smallMax");
 
 inline Chunk::Chunk(std::lock_guard<StaticMutex>& lock)
@@ -165,7 +165,7 @@
 
 inline SmallPage* Chunk::page(size_t offset)
 {
-    size_t pageNumber = offset / vmPageSize;
+    size_t pageNumber = offset / smallPageSize;
     return &m_pages[pageNumber];
 }
 
@@ -221,7 +221,7 @@
 
 inline void* Object::pageBegin()
 {
-    return m_chunk->object(roundDownToMultipleOf(vmPageSize, m_offset));
+    return m_chunk->object(roundDownToMultipleOf(smallPageSize, m_offset));
 }
 
 inline SmallLine* Object::line()

Modified: trunk/Source/bmalloc/bmalloc/Heap.cpp (198701 => 198702)


--- trunk/Source/bmalloc/bmalloc/Heap.cpp	2016-03-25 23:45:13 UTC (rev 198701)
+++ trunk/Source/bmalloc/bmalloc/Heap.cpp	2016-03-25 23:46:01 UTC (rev 198702)
@@ -52,7 +52,7 @@
 
         size_t object = 0;
         size_t line = 0;
-        while (object < vmPageSize) {
+        while (object < smallPageSize) {
             line = object / smallLineSize;
             size_t leftover = object % smallLineSize;
 
@@ -66,7 +66,7 @@
         }
 
         // Don't allow the last object in a page to escape the page.
-        if (object > vmPageSize) {
+        if (object > smallPageSize) {
             BASSERT(metadata[line].objectCount);
             --metadata[line].objectCount;
         }
@@ -200,8 +200,8 @@
         return page;
     }
 
-    size_t unalignedSize = largeMin + vmPageSize - largeAlignment + vmPageSize;
-    LargeObject largeObject = allocateLarge(lock, vmPageSize, vmPageSize, unalignedSize);
+    size_t unalignedSize = largeMin + smallPageSize - largeAlignment + smallPageSize;
+    LargeObject largeObject = allocateLarge(lock, smallPageSize, smallPageSize, unalignedSize);
 
     // Transform our large object into a small object page. We deref here
     // because our small objects will keep their own refcounts on the line.
@@ -307,7 +307,7 @@
     BASSERT(size >= largeMin);
     BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
     
-    if (size <= vmPageSize)
+    if (size <= smallPageSize)
         scavengeSmallPages(lock);
 
     LargeObject largeObject = m_largeObjects.take(size);
@@ -338,7 +338,7 @@
     BASSERT(alignment >= largeAlignment);
     BASSERT(isPowerOfTwo(alignment));
 
-    if (size <= vmPageSize)
+    if (size <= smallPageSize)
         scavengeSmallPages(lock);
 
     LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
@@ -412,7 +412,7 @@
     // in the allocated list. This is an important optimization because it
     // keeps the free list short, speeding up allocation and merging.
 
-    std::pair<XLargeRange, XLargeRange> allocated = range.split(roundUpToMultipleOf<vmPageSize>(size));
+    std::pair<XLargeRange, XLargeRange> allocated = range.split(roundUpToMultipleOf(vmPageSizePhysical(), size));
     if (allocated.first.vmState().hasVirtual()) {
         vmAllocatePhysicalPagesSloppy(allocated.first.begin(), allocated.first.size());
         allocated.first.setVMState(VMState::Physical);
@@ -429,7 +429,7 @@
 
     m_isAllocatingPages = true;
 
-    size = std::max(vmPageSize, size);
+    size = std::max(vmPageSizePhysical(), size);
     alignment = roundUpToMultipleOf<xLargeAlignment>(alignment);
 
     XLargeRange range = m_xLargeMap.takeFree(alignment, size);
@@ -456,7 +456,7 @@
 {
     BASSERT(object.size() > newSize);
 
-    if (object.size() - newSize < vmPageSize)
+    if (object.size() - newSize < vmPageSizePhysical())
         return;
     
     XLargeRange range = m_xLargeMap.takeAllocated(object.begin());

Modified: trunk/Source/bmalloc/bmalloc/Sizes.h (198701 => 198702)


--- trunk/Source/bmalloc/bmalloc/Sizes.h	2016-03-25 23:45:13 UTC (rev 198701)
+++ trunk/Source/bmalloc/bmalloc/Sizes.h	2016-03-25 23:46:01 UTC (rev 198702)
@@ -46,14 +46,10 @@
     static const size_t alignment = 8;
     static const size_t alignmentMask = alignment - 1ul;
 
-#if BPLATFORM(IOS)
-    static const size_t vmPageSize = 16 * kB;
-#else
-    static const size_t vmPageSize = 4 * kB;
-#endif
-    
+    static const size_t smallPageSize = 4 * kB;
+
     static const size_t smallLineSize = 256;
-    static const size_t smallLineCount = vmPageSize / smallLineSize;
+    static const size_t smallLineCount = smallPageSize / smallLineSize;
 
     static const size_t smallMax = 1 * kB;
     static const size_t maskSizeClassMax = 512;

Modified: trunk/Source/bmalloc/bmalloc/VMAllocate.h (198701 => 198702)


--- trunk/Source/bmalloc/bmalloc/VMAllocate.h	2016-03-25 23:45:13 UTC (rev 198701)
+++ trunk/Source/bmalloc/bmalloc/VMAllocate.h	2016-03-25 23:46:01 UTC (rev 198702)
@@ -26,15 +26,16 @@
 #ifndef VMAllocate_h
 #define VMAllocate_h
 
+#include "Algorithm.h"
 #include "BAssert.h"
 #include "Range.h"
-#include "Sizes.h"
 #include "Syscall.h"
 #include <algorithm>
 #include <sys/mman.h>
 #include <unistd.h>
 
 #if BOS(DARWIN)
+#include <mach/vm_page_size.h>
 #include <mach/vm_statistics.h>
 #endif
 
@@ -46,31 +47,59 @@
 #define BMALLOC_VM_TAG -1
 #endif
 
+inline size_t vmPageSizePhysical()
+{
+#if BPLATFORM(DARWIN)
+    return vm_kernel_page_size;
+#else
+    return sysconf(_SC_PAGESIZE);
+#endif
+}
+
+inline size_t vmPageSize()
+{
+#if BPLATFORM(DARWIN)
+    return vm_page_size;
+#else
+    return sysconf(_SC_PAGESIZE);
+#endif
+}
+
 inline size_t vmSize(size_t size)
 {
-    return roundUpToMultipleOf<vmPageSize>(size);
+    return roundUpToMultipleOf(vmPageSize(), size);
 }
 
 inline void vmValidate(size_t vmSize)
 {
-    // We use getpagesize() here instead of vmPageSize because vmPageSize is
-    // allowed to be larger than the OS's true page size.
-
     UNUSED(vmSize);
     BASSERT(vmSize);
-    BASSERT(vmSize == roundUpToMultipleOf(static_cast<size_t>(getpagesize()), vmSize));
+    BASSERT(vmSize == roundUpToMultipleOf(vmPageSize(), vmSize));
 }
 
 inline void vmValidate(void* p, size_t vmSize)
 {
-    // We use getpagesize() here instead of vmPageSize because vmPageSize is
-    // allowed to be larger than the OS's true page size.
+    vmValidate(vmSize);
+    
+    UNUSED(p);
+    BASSERT(p);
+    BASSERT(p == mask(p, ~(vmPageSize() - 1)));
+}
 
+inline void vmValidatePhysical(size_t vmSize)
+{
+    UNUSED(vmSize);
+    BASSERT(vmSize);
+    BASSERT(vmSize == roundUpToMultipleOf(vmPageSizePhysical(), vmSize));
+}
+
+inline void vmValidatePhysical(void* p, size_t vmSize)
+{
     vmValidate(vmSize);
     
     UNUSED(p);
     BASSERT(p);
-    BASSERT(p == mask(p, ~(getpagesize() - 1)));
+    BASSERT(p == mask(p, ~(vmPageSizePhysical() - 1)));
 }
 
 inline void* tryVMAllocate(size_t vmSize)
@@ -135,7 +164,7 @@
 
 inline void vmDeallocatePhysicalPages(void* p, size_t vmSize)
 {
-    vmValidate(p, vmSize);
+    vmValidatePhysical(p, vmSize);
 #if BOS(DARWIN)
     SYSCALL(madvise(p, vmSize, MADV_FREE_REUSABLE));
 #else
@@ -145,7 +174,7 @@
 
 inline void vmAllocatePhysicalPages(void* p, size_t vmSize)
 {
-    vmValidate(p, vmSize);
+    vmValidatePhysical(p, vmSize);
 #if BOS(DARWIN)
     SYSCALL(madvise(p, vmSize, MADV_FREE_REUSE));
 #else
@@ -156,8 +185,8 @@
 // Trims requests that are un-page-aligned.
 inline void vmDeallocatePhysicalPagesSloppy(void* p, size_t size)
 {
-    char* begin = roundUpToMultipleOf<vmPageSize>(static_cast<char*>(p));
-    char* end = roundDownToMultipleOf<vmPageSize>(static_cast<char*>(p) + size);
+    char* begin = roundUpToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p));
+    char* end = roundDownToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p) + size);
 
     if (begin >= end)
         return;
@@ -168,8 +197,8 @@
 // Expands requests that are un-page-aligned.
 inline void vmAllocatePhysicalPagesSloppy(void* p, size_t size)
 {
-    char* begin = roundDownToMultipleOf<vmPageSize>(static_cast<char*>(p));
-    char* end = roundUpToMultipleOf<vmPageSize>(static_cast<char*>(p) + size);
+    char* begin = roundDownToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p));
+    char* end = roundUpToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p) + size);
 
     if (begin >= end)
         return;

Modified: trunk/Source/bmalloc/bmalloc/Vector.h (198701 => 198702)


--- trunk/Source/bmalloc/bmalloc/Vector.h	2016-03-25 23:45:13 UTC (rev 198701)
+++ trunk/Source/bmalloc/bmalloc/Vector.h	2016-03-25 23:46:01 UTC (rev 198702)
@@ -73,7 +73,7 @@
 private:
     static const size_t growFactor = 2;
     static const size_t shrinkFactor = 4;
-    static const size_t initialCapacity = vmPageSize / sizeof(T);
+    static size_t initialCapacity() { return vmPageSize() / sizeof(T); }
 
     void growCapacity();
     void shrinkCapacity();
@@ -150,7 +150,7 @@
 {
     BASSERT(size <= m_size);
     m_size = size;
-    if (m_capacity > initialCapacity && m_size < m_capacity / shrinkFactor)
+    if (m_size < m_capacity / shrinkFactor && m_capacity > initialCapacity())
         shrinkCapacity();
 }
 
@@ -171,14 +171,14 @@
 template<typename T>
 NO_INLINE void Vector<T>::shrinkCapacity()
 {
-    size_t newCapacity = max(initialCapacity, m_capacity / shrinkFactor);
+    size_t newCapacity = max(initialCapacity(), m_capacity / shrinkFactor);
     reallocateBuffer(newCapacity);
 }
 
 template<typename T>
 NO_INLINE void Vector<T>::growCapacity()
 {
-    size_t newCapacity = max(initialCapacity, m_size * growFactor);
+    size_t newCapacity = max(initialCapacity(), m_size * growFactor);
     reallocateBuffer(newCapacity);
 }
 

Modified: trunk/Source/bmalloc/bmalloc/XLargeMap.cpp (198701 => 198702)


--- trunk/Source/bmalloc/bmalloc/XLargeMap.cpp	2016-03-25 23:45:13 UTC (rev 198701)
+++ trunk/Source/bmalloc/bmalloc/XLargeMap.cpp	2016-03-25 23:46:01 UTC (rev 198702)
@@ -24,6 +24,7 @@
  */
 
 #include "XLargeMap.h"
+#include "Sizes.h"
 
 namespace bmalloc {
 
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to