Diff
Modified: trunk/Source/bmalloc/ChangeLog (198717 => 198718)
--- trunk/Source/bmalloc/ChangeLog 2016-03-26 18:37:03 UTC (rev 198717)
+++ trunk/Source/bmalloc/ChangeLog 2016-03-26 20:07:25 UTC (rev 198718)
@@ -1,3 +1,19 @@
+2016-03-26 Geoffrey Garen <[email protected]>
+
+ Unreviewed, rolling out r198702, r198704.
+
+ Caused a memory regression on PLUM.
+
+ Reverted changeset:
+
+ bmalloc: fix an ASSERT on iOS
+ https://bugs.webkit.org/show_bug.cgi?id=155911
+ http://trac.webkit.org/changeset/198704
+
+ bmalloc: support physical page sizes that don't match the virtual page size
+ https://bugs.webkit.org/show_bug.cgi?id=155898
+ http://trac.webkit.org/changeset/198702
+
2016-03-25 Geoffrey Garen <[email protected]>
bmalloc: fix an ASSERT on iOS
Modified: trunk/Source/bmalloc/bmalloc/Chunk.h (198717 => 198718)
--- trunk/Source/bmalloc/bmalloc/Chunk.h 2016-03-26 18:37:03 UTC (rev 198717)
+++ trunk/Source/bmalloc/bmalloc/Chunk.h 2016-03-26 20:07:25 UTC (rev 198718)
@@ -81,14 +81,14 @@
// We use the X's for boundary tags and the O's for edge sentinels.
std::array<SmallLine, chunkSize / smallLineSize> m_lines;
- std::array<SmallPage, chunkSize / smallPageSize> m_pages;
+ std::array<SmallPage, chunkSize / vmPageSize> m_pages;
std::array<BoundaryTag, boundaryTagCount> m_boundaryTags;
- char m_memory[] __attribute__((aligned(largeAlignment + 0)));
+ char m_memory[] __attribute__((aligned(2 * smallMax + 0)));
};
static_assert(sizeof(Chunk) + largeMax <= chunkSize, "largeMax is too big");
static_assert(
- sizeof(Chunk) % smallPageSize + 2 * smallMax <= smallPageSize,
+ sizeof(Chunk) % vmPageSize + 2 * smallMax <= vmPageSize,
"the first page of object memory in a small chunk must be able to allocate smallMax");
inline Chunk::Chunk(std::lock_guard<StaticMutex>& lock)
@@ -165,7 +165,7 @@
inline SmallPage* Chunk::page(size_t offset)
{
- size_t pageNumber = offset / smallPageSize;
+ size_t pageNumber = offset / vmPageSize;
return &m_pages[pageNumber];
}
@@ -221,7 +221,7 @@
inline void* Object::pageBegin()
{
- return m_chunk->object(roundDownToMultipleOf(smallPageSize, m_offset));
+ return m_chunk->object(roundDownToMultipleOf(vmPageSize, m_offset));
}
inline SmallLine* Object::line()
Modified: trunk/Source/bmalloc/bmalloc/Heap.cpp (198717 => 198718)
--- trunk/Source/bmalloc/bmalloc/Heap.cpp 2016-03-26 18:37:03 UTC (rev 198717)
+++ trunk/Source/bmalloc/bmalloc/Heap.cpp 2016-03-26 20:07:25 UTC (rev 198718)
@@ -52,7 +52,7 @@
size_t object = 0;
size_t line = 0;
- while (object < smallPageSize) {
+ while (object < vmPageSize) {
line = object / smallLineSize;
size_t leftover = object % smallLineSize;
@@ -66,7 +66,7 @@
}
// Don't allow the last object in a page to escape the page.
- if (object > smallPageSize) {
+ if (object > vmPageSize) {
BASSERT(metadata[line].objectCount);
--metadata[line].objectCount;
}
@@ -200,8 +200,8 @@
return page;
}
- size_t unalignedSize = largeMin + smallPageSize - largeAlignment + smallPageSize;
- LargeObject largeObject = allocateLarge(lock, smallPageSize, smallPageSize, unalignedSize);
+ size_t unalignedSize = largeMin + vmPageSize - largeAlignment + vmPageSize;
+ LargeObject largeObject = allocateLarge(lock, vmPageSize, vmPageSize, unalignedSize);
// Transform our large object into a small object page. We deref here
// because our small objects will keep their own refcounts on the line.
@@ -307,7 +307,7 @@
BASSERT(size >= largeMin);
BASSERT(size == roundUpToMultipleOf<largeAlignment>(size));
- if (size <= smallPageSize)
+ if (size <= vmPageSize)
scavengeSmallPages(lock);
LargeObject largeObject = m_largeObjects.take(size);
@@ -338,7 +338,7 @@
BASSERT(alignment >= largeAlignment);
BASSERT(isPowerOfTwo(alignment));
- if (size <= smallPageSize)
+ if (size <= vmPageSize)
scavengeSmallPages(lock);
LargeObject largeObject = m_largeObjects.take(alignment, size, unalignedSize);
@@ -412,7 +412,7 @@
// in the allocated list. This is an important optimization because it
// keeps the free list short, speeding up allocation and merging.
- std::pair<XLargeRange, XLargeRange> allocated = range.split(roundUpToMultipleOf(vmPageSizePhysical(), size));
+ std::pair<XLargeRange, XLargeRange> allocated = range.split(roundUpToMultipleOf<vmPageSize>(size));
if (allocated.first.vmState().hasVirtual()) {
vmAllocatePhysicalPagesSloppy(allocated.first.begin(), allocated.first.size());
allocated.first.setVMState(VMState::Physical);
@@ -429,7 +429,7 @@
m_isAllocatingPages = true;
- size = std::max(vmPageSizePhysical(), size);
+ size = std::max(vmPageSize, size);
alignment = roundUpToMultipleOf<xLargeAlignment>(alignment);
XLargeRange range = m_xLargeMap.takeFree(alignment, size);
@@ -456,7 +456,7 @@
{
BASSERT(object.size() > newSize);
- if (object.size() - newSize < vmPageSizePhysical())
+ if (object.size() - newSize < vmPageSize)
return;
XLargeRange range = m_xLargeMap.takeAllocated(object.begin());
Modified: trunk/Source/bmalloc/bmalloc/Sizes.h (198717 => 198718)
--- trunk/Source/bmalloc/bmalloc/Sizes.h 2016-03-26 18:37:03 UTC (rev 198717)
+++ trunk/Source/bmalloc/bmalloc/Sizes.h 2016-03-26 20:07:25 UTC (rev 198718)
@@ -46,10 +46,14 @@
static const size_t alignment = 8;
static const size_t alignmentMask = alignment - 1ul;
- static const size_t smallPageSize = 4 * kB;
-
+#if BPLATFORM(IOS)
+ static const size_t vmPageSize = 16 * kB;
+#else
+ static const size_t vmPageSize = 4 * kB;
+#endif
+
static const size_t smallLineSize = 256;
- static const size_t smallLineCount = smallPageSize / smallLineSize;
+ static const size_t smallLineCount = vmPageSize / smallLineSize;
static const size_t smallMax = 1 * kB;
static const size_t maskSizeClassMax = 512;
Modified: trunk/Source/bmalloc/bmalloc/VMAllocate.h (198717 => 198718)
--- trunk/Source/bmalloc/bmalloc/VMAllocate.h 2016-03-26 18:37:03 UTC (rev 198717)
+++ trunk/Source/bmalloc/bmalloc/VMAllocate.h 2016-03-26 20:07:25 UTC (rev 198718)
@@ -26,16 +26,15 @@
#ifndef VMAllocate_h
#define VMAllocate_h
-#include "Algorithm.h"
#include "BAssert.h"
#include "Range.h"
+#include "Sizes.h"
#include "Syscall.h"
#include <algorithm>
#include <sys/mman.h>
#include <unistd.h>
#if BOS(DARWIN)
-#include <mach/vm_page_size.h>
#include <mach/vm_statistics.h>
#endif
@@ -47,61 +46,33 @@
#define BMALLOC_VM_TAG -1
#endif
-inline size_t vmPageSizePhysical()
-{
-#if BPLATFORM(DARWIN)
- return vm_kernel_page_size;
-#else
- return sysconf(_SC_PAGESIZE);
-#endif
-}
-
-inline size_t vmPageSize()
-{
-#if BPLATFORM(DARWIN)
- return vm_page_size;
-#else
- return sysconf(_SC_PAGESIZE);
-#endif
-}
-
inline size_t vmSize(size_t size)
{
- return roundUpToMultipleOf(vmPageSize(), size);
+ return roundUpToMultipleOf<vmPageSize>(size);
}
inline void vmValidate(size_t vmSize)
{
+ // We use getpagesize() here instead of vmPageSize because vmPageSize is
+ // allowed to be larger than the OS's true page size.
+
UNUSED(vmSize);
BASSERT(vmSize);
- BASSERT(vmSize == roundUpToMultipleOf(vmPageSize(), vmSize));
+ BASSERT(vmSize == roundUpToMultipleOf(static_cast<size_t>(getpagesize()), vmSize));
}
inline void vmValidate(void* p, size_t vmSize)
{
+ // We use getpagesize() here instead of vmPageSize because vmPageSize is
+ // allowed to be larger than the OS's true page size.
+
vmValidate(vmSize);
UNUSED(p);
BASSERT(p);
- BASSERT(p == mask(p, ~(vmPageSize() - 1)));
+ BASSERT(p == mask(p, ~(getpagesize() - 1)));
}
-inline void vmValidatePhysical(size_t vmSize)
-{
- UNUSED(vmSize);
- BASSERT(vmSize);
- BASSERT(vmSize == roundUpToMultipleOf(vmPageSizePhysical(), vmSize));
-}
-
-inline void vmValidatePhysical(void* p, size_t vmSize)
-{
- vmValidatePhysical(vmSize);
-
- UNUSED(p);
- BASSERT(p);
- BASSERT(p == mask(p, ~(vmPageSizePhysical() - 1)));
-}
-
inline void* tryVMAllocate(size_t vmSize)
{
vmValidate(vmSize);
@@ -164,7 +135,7 @@
inline void vmDeallocatePhysicalPages(void* p, size_t vmSize)
{
- vmValidatePhysical(p, vmSize);
+ vmValidate(p, vmSize);
#if BOS(DARWIN)
SYSCALL(madvise(p, vmSize, MADV_FREE_REUSABLE));
#else
@@ -174,7 +145,7 @@
inline void vmAllocatePhysicalPages(void* p, size_t vmSize)
{
- vmValidatePhysical(p, vmSize);
+ vmValidate(p, vmSize);
#if BOS(DARWIN)
SYSCALL(madvise(p, vmSize, MADV_FREE_REUSE));
#else
@@ -185,8 +156,8 @@
// Trims requests that are un-page-aligned.
inline void vmDeallocatePhysicalPagesSloppy(void* p, size_t size)
{
- char* begin = roundUpToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p));
- char* end = roundDownToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p) + size);
+ char* begin = roundUpToMultipleOf<vmPageSize>(static_cast<char*>(p));
+ char* end = roundDownToMultipleOf<vmPageSize>(static_cast<char*>(p) + size);
if (begin >= end)
return;
@@ -197,8 +168,8 @@
// Expands requests that are un-page-aligned.
inline void vmAllocatePhysicalPagesSloppy(void* p, size_t size)
{
- char* begin = roundDownToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p));
- char* end = roundUpToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p) + size);
+ char* begin = roundDownToMultipleOf<vmPageSize>(static_cast<char*>(p));
+ char* end = roundUpToMultipleOf<vmPageSize>(static_cast<char*>(p) + size);
if (begin >= end)
return;
Modified: trunk/Source/bmalloc/bmalloc/Vector.h (198717 => 198718)
--- trunk/Source/bmalloc/bmalloc/Vector.h 2016-03-26 18:37:03 UTC (rev 198717)
+++ trunk/Source/bmalloc/bmalloc/Vector.h 2016-03-26 20:07:25 UTC (rev 198718)
@@ -73,7 +73,7 @@
private:
static const size_t growFactor = 2;
static const size_t shrinkFactor = 4;
- static size_t initialCapacity() { return vmPageSize() / sizeof(T); }
+ static const size_t initialCapacity = vmPageSize / sizeof(T);
void growCapacity();
void shrinkCapacity();
@@ -150,7 +150,7 @@
{
BASSERT(size <= m_size);
m_size = size;
- if (m_size < m_capacity / shrinkFactor && m_capacity > initialCapacity())
+ if (m_capacity > initialCapacity && m_size < m_capacity / shrinkFactor)
shrinkCapacity();
}
@@ -171,14 +171,14 @@
template<typename T>
NO_INLINE void Vector<T>::shrinkCapacity()
{
- size_t newCapacity = max(initialCapacity(), m_capacity / shrinkFactor);
+ size_t newCapacity = max(initialCapacity, m_capacity / shrinkFactor);
reallocateBuffer(newCapacity);
}
template<typename T>
NO_INLINE void Vector<T>::growCapacity()
{
- size_t newCapacity = max(initialCapacity(), m_size * growFactor);
+ size_t newCapacity = max(initialCapacity, m_size * growFactor);
reallocateBuffer(newCapacity);
}
Modified: trunk/Source/bmalloc/bmalloc/XLargeMap.cpp (198717 => 198718)
--- trunk/Source/bmalloc/bmalloc/XLargeMap.cpp 2016-03-26 18:37:03 UTC (rev 198717)
+++ trunk/Source/bmalloc/bmalloc/XLargeMap.cpp 2016-03-26 20:07:25 UTC (rev 198718)
@@ -24,7 +24,6 @@
*/
#include "XLargeMap.h"
-#include "Sizes.h"
namespace bmalloc {