Title: [231175] trunk
Revision
231175
Author
[email protected]
Date
2018-04-30 15:29:21 -0700 (Mon, 30 Apr 2018)

Log Message

Apply PtrTags to the MetaAllocator and friends.
https://bugs.webkit.org/show_bug.cgi?id=185110
<rdar://problem/39533895>

Reviewed by Saam Barati.

Source/_javascript_Core:

1. LinkBuffer now takes a MacroAssemblerCodePtr instead of a void* pointer.
2. Apply pointer tagging to the boundary pointers of the FixedExecutableMemoryPool,
   and add a sanity check to verify that allocated code buffers are within those
   bounds.

* assembler/LinkBuffer.cpp:
(JSC::LinkBuffer::finalizeCodeWithoutDisassemblyImpl):
(JSC::LinkBuffer::copyCompactAndLinkCode):
(JSC::LinkBuffer::linkCode):
(JSC::LinkBuffer::allocate):
* assembler/LinkBuffer.h:
(JSC::LinkBuffer::LinkBuffer):
(JSC::LinkBuffer::debugAddress):
(JSC::LinkBuffer::code):
* assembler/MacroAssemblerCodeRef.h:
(JSC::MacroAssemblerCodeRef::MacroAssemblerCodeRef):
* bytecode/InlineAccess.cpp:
(JSC::linkCodeInline):
(JSC::InlineAccess::rewireStubAsJump):
* dfg/DFGJITCode.cpp:
(JSC::DFG::JITCode::findPC):
* ftl/FTLJITCode.cpp:
(JSC::FTL::JITCode::findPC):
* jit/ExecutableAllocator.cpp:
(JSC::FixedVMPoolExecutableAllocator::FixedVMPoolExecutableAllocator):
(JSC::FixedVMPoolExecutableAllocator::jitWriteThunkGenerator):
(JSC::ExecutableAllocator::allocate):
* jit/ExecutableAllocator.h:
(JSC::isJITPC):
(JSC::performJITMemcpy):
* jit/JIT.cpp:
(JSC::JIT::link):
* jit/JITMathIC.h:
(JSC::isProfileEmpty):
* runtime/JSCPtrTag.h:
* wasm/WasmCallee.cpp:
(JSC::Wasm::Callee::Callee):
* wasm/WasmFaultSignalHandler.cpp:
(JSC::Wasm::trapHandler):

Source/WTF:

1. Introduce a MetaAllocatorPtr smart pointer to do pointer tagging.
2. Use MetaAllocatorPtr in MetaAllocator and MetaAllocatorHandle.

* WTF.xcodeproj/project.pbxproj:
* wtf/CMakeLists.txt:
* wtf/MetaAllocator.cpp:
(WTF::MetaAllocator::release):
(WTF::MetaAllocatorHandle::MetaAllocatorHandle):
(WTF::MetaAllocatorHandle::shrink):
(WTF::MetaAllocatorHandle::dump const):
(WTF::MetaAllocator::allocate):
(WTF::MetaAllocator::findAndRemoveFreeSpace):
(WTF::MetaAllocator::addFreeSpaceFromReleasedHandle):
(WTF::MetaAllocator::addFreshFreeSpace):
(WTF::MetaAllocator::debugFreeSpaceSize):
(WTF::MetaAllocator::addFreeSpace):
(WTF::MetaAllocator::allocFreeSpaceNode):
* wtf/MetaAllocator.h:
(WTF::MetaAllocatorTracker::find):
(WTF::MetaAllocator::FreeSpaceNode::FreeSpaceNode):
(WTF::MetaAllocator::FreeSpaceNode::sizeInBytes):
(WTF::MetaAllocator::FreeSpaceNode::key):
* wtf/MetaAllocatorHandle.h:
(WTF::MetaAllocatorHandle::start const):
(WTF::MetaAllocatorHandle::end const):
(WTF::MetaAllocatorHandle::startAsInteger const):
(WTF::MetaAllocatorHandle::endAsInteger const):
(WTF::MetaAllocatorHandle::sizeInBytes const):
(WTF::MetaAllocatorHandle::containsIntegerAddress const):
(WTF::MetaAllocatorHandle::key):
* wtf/MetaAllocatorPtr.h: Added.
(WTF::MetaAllocatorPtr::MetaAllocatorPtr):
(WTF::MetaAllocatorPtr:: const):
(WTF::MetaAllocatorPtr::operator bool const):
(WTF::MetaAllocatorPtr::operator! const):
(WTF::MetaAllocatorPtr::operator== const):
(WTF::MetaAllocatorPtr::operator!= const):
(WTF::MetaAllocatorPtr::operator+ const):
(WTF::MetaAllocatorPtr::operator- const):
(WTF::MetaAllocatorPtr::operator+=):
(WTF::MetaAllocatorPtr::operator-=):
(WTF::MetaAllocatorPtr::isEmptyValue const):
(WTF::MetaAllocatorPtr::isDeletedValue const):
(WTF::MetaAllocatorPtr::hash const):
(WTF::MetaAllocatorPtr::emptyValue):
(WTF::MetaAllocatorPtr::deletedValue):
(WTF::MetaAllocatorPtrHash::hash):
(WTF::MetaAllocatorPtrHash::equal):
* wtf/PtrTag.h:

Tools:

Update the test to match MetaAllocator changes in WTF.

* TestWebKitAPI/Tests/WTF/MetaAllocator.cpp:
(TestWebKitAPI::TEST_F):
(WTF::tagForPtr):
(WTF::ptrTagName):

Modified Paths

Added Paths

Diff

Modified: trunk/Source/_javascript_Core/ChangeLog (231174 => 231175)


--- trunk/Source/_javascript_Core/ChangeLog	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/ChangeLog	2018-04-30 22:29:21 UTC (rev 231175)
@@ -1,3 +1,51 @@
+2018-04-30  Mark Lam  <[email protected]>
+
+        Apply PtrTags to the MetaAllocator and friends.
+        https://bugs.webkit.org/show_bug.cgi?id=185110
+        <rdar://problem/39533895>
+
+        Reviewed by Saam Barati.
+
+        1. LinkBuffer now takes a MacroAssemblerCodePtr instead of a void* pointer.
+        2. Apply pointer tagging to the boundary pointers of the FixedExecutableMemoryPool,
+           and add a sanity check to verify that allocated code buffers are within those
+           bounds.
+
+        * assembler/LinkBuffer.cpp:
+        (JSC::LinkBuffer::finalizeCodeWithoutDisassemblyImpl):
+        (JSC::LinkBuffer::copyCompactAndLinkCode):
+        (JSC::LinkBuffer::linkCode):
+        (JSC::LinkBuffer::allocate):
+        * assembler/LinkBuffer.h:
+        (JSC::LinkBuffer::LinkBuffer):
+        (JSC::LinkBuffer::debugAddress):
+        (JSC::LinkBuffer::code):
+        * assembler/MacroAssemblerCodeRef.h:
+        (JSC::MacroAssemblerCodeRef::MacroAssemblerCodeRef):
+        * bytecode/InlineAccess.cpp:
+        (JSC::linkCodeInline):
+        (JSC::InlineAccess::rewireStubAsJump):
+        * dfg/DFGJITCode.cpp:
+        (JSC::DFG::JITCode::findPC):
+        * ftl/FTLJITCode.cpp:
+        (JSC::FTL::JITCode::findPC):
+        * jit/ExecutableAllocator.cpp:
+        (JSC::FixedVMPoolExecutableAllocator::FixedVMPoolExecutableAllocator):
+        (JSC::FixedVMPoolExecutableAllocator::jitWriteThunkGenerator):
+        (JSC::ExecutableAllocator::allocate):
+        * jit/ExecutableAllocator.h:
+        (JSC::isJITPC):
+        (JSC::performJITMemcpy):
+        * jit/JIT.cpp:
+        (JSC::JIT::link):
+        * jit/JITMathIC.h:
+        (JSC::isProfileEmpty):
+        * runtime/JSCPtrTag.h:
+        * wasm/WasmCallee.cpp:
+        (JSC::Wasm::Callee::Callee):
+        * wasm/WasmFaultSignalHandler.cpp:
+        (JSC::Wasm::trapHandler):
+
 2018-04-30  Keith Miller  <[email protected]>
 
         Move the MayBePrototype JSCell header bit to InlineTypeFlags

Modified: trunk/Source/_javascript_Core/assembler/LinkBuffer.cpp (231174 => 231175)


--- trunk/Source/_javascript_Core/assembler/LinkBuffer.cpp	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/assembler/LinkBuffer.cpp	2018-04-30 22:29:21 UTC (rev 231175)
@@ -52,7 +52,7 @@
     if (m_executableMemory)
         return CodeRef<LinkBufferPtrTag>(*m_executableMemory);
     
-    return CodeRef<LinkBufferPtrTag>::createSelfManagedCodeRef(MacroAssemblerCodePtr<LinkBufferPtrTag>(tagCodePtr<LinkBufferPtrTag>(m_code)));
+    return CodeRef<LinkBufferPtrTag>::createSelfManagedCodeRef(m_code);
 }
 
 LinkBuffer::CodeRef<LinkBufferPtrTag> LinkBuffer::finalizeCodeWithDisassemblyImpl(const char* format, ...)
@@ -112,7 +112,7 @@
     AssemblerData outBuffer(m_size);
 
     uint8_t* outData = reinterpret_cast<uint8_t*>(outBuffer.buffer());
-    uint8_t* codeOutData = reinterpret_cast<uint8_t*>(m_code);
+    uint8_t* codeOutData = m_code.dataLocation<uint8_t*>();
 
     int readPtr = 0;
     int writePtr = 0;
@@ -184,13 +184,13 @@
         MacroAssembler::AssemblerType_T::fillNops(outData + compactSize, nopSizeInBytes, isCopyingToExecutableMemory);
     }
 
-    performJITMemcpy(m_code, outData, m_size);
+    performJITMemcpy(codeOutData, outData, m_size);
 
 #if DUMP_LINK_STATISTICS
-    dumpLinkStatistics(m_code, initialSize, m_size);
+    dumpLinkStatistics(codeOutData, initialSize, m_size);
 #endif
 #if DUMP_CODE
-    dumpCode(m_code, m_size);
+    dumpCode(codeOutData, m_size);
 #endif
 }
 #endif
@@ -210,12 +210,13 @@
         return;
     ASSERT(m_code);
     AssemblerBuffer& buffer = macroAssembler.m_assembler.buffer();
+    void* code = m_code.dataLocation();
 #if CPU(ARM_TRADITIONAL)
-    macroAssembler.m_assembler.prepareExecutableCopy(m_code);
+    macroAssembler.m_assembler.prepareExecutableCopy(code);
 #endif
-    performJITMemcpy(m_code, buffer.data(), buffer.codeSize());
+    performJITMemcpy(code, buffer.data(), buffer.codeSize());
 #if CPU(MIPS)
-    macroAssembler.m_assembler.relocateJumps(buffer.data(), m_code);
+    macroAssembler.m_assembler.relocateJumps(buffer.data(), code);
 #endif
 #elif CPU(ARM_THUMB2)
     copyCompactAndLinkCode<uint16_t>(macroAssembler, ownerUID, effort);
@@ -243,11 +244,11 @@
         macroAssembler.breakpoint();
         initialSize = macroAssembler.m_assembler.codeSize();
     }
-    
+
     m_executableMemory = ExecutableAllocator::singleton().allocate(initialSize, ownerUID, effort);
     if (!m_executableMemory)
         return;
-    m_code = m_executableMemory->start();
+    m_code = MacroAssemblerCodePtr<LinkBufferPtrTag>(m_executableMemory->start().retaggedPtr<LinkBufferPtrTag>());
     m_size = initialSize;
     m_didAllocate = true;
 }

Modified: trunk/Source/_javascript_Core/assembler/LinkBuffer.h (231174 => 231175)


--- trunk/Source/_javascript_Core/assembler/LinkBuffer.h	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/assembler/LinkBuffer.h	2018-04-30 22:29:21 UTC (rev 231175)
@@ -36,6 +36,7 @@
 
 #include "JITCompilationEffort.h"
 #include "MacroAssembler.h"
+#include "MacroAssemblerCodeRef.h"
 #include <wtf/DataLog.h>
 #include <wtf/FastMalloc.h>
 #include <wtf/Noncopyable.h>
@@ -81,7 +82,6 @@
     LinkBuffer(MacroAssembler& macroAssembler, void* ownerUID, JITCompilationEffort effort = JITCompilationMustSucceed)
         : m_size(0)
         , m_didAllocate(false)
-        , m_code(0)
 #ifndef NDEBUG
         , m_completed(false)
 #endif
@@ -89,10 +89,11 @@
         linkCode(macroAssembler, ownerUID, effort);
     }
 
-    LinkBuffer(MacroAssembler& macroAssembler, void* code, size_t size, JITCompilationEffort effort = JITCompilationMustSucceed, bool shouldPerformBranchCompaction = true)
+    template<PtrTag tag>
+    LinkBuffer(MacroAssembler& macroAssembler, MacroAssemblerCodePtr<tag> code, size_t size, JITCompilationEffort effort = JITCompilationMustSucceed, bool shouldPerformBranchCompaction = true)
         : m_size(size)
         , m_didAllocate(false)
-        , m_code(code)
+        , m_code(code.template retagged<LinkBufferPtrTag>())
 #ifndef NDEBUG
         , m_completed(false)
 #endif
@@ -281,7 +282,7 @@
 
     void* debugAddress()
     {
-        return m_code;
+        return m_code.dataLocation();
     }
 
     size_t size() const { return m_size; }
@@ -313,7 +314,7 @@
     // Keep this private! - the underlying code should only be obtained externally via finalizeCode().
     void* code()
     {
-        return m_code;
+        return m_code.dataLocation();
     }
     
     void allocate(MacroAssembler&, void* ownerUID, JITCompilationEffort);
@@ -341,7 +342,7 @@
     bool m_shouldPerformBranchCompaction { true };
 #endif
     bool m_didAllocate;
-    void* m_code;
+    MacroAssemblerCodePtr<LinkBufferPtrTag> m_code;
 #ifndef NDEBUG
     bool m_completed;
 #endif

Modified: trunk/Source/_javascript_Core/assembler/MacroAssemblerCodeRef.h (231174 => 231175)


--- trunk/Source/_javascript_Core/assembler/MacroAssemblerCodeRef.h	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/assembler/MacroAssemblerCodeRef.h	2018-04-30 22:29:21 UTC (rev 231175)
@@ -214,6 +214,8 @@
     static void dumpWithName(void* executableAddress, void* dataLocation, const char* name, PrintStream& out);
 };
 
+// FIXME: Make JSC MacroAssemblerCodePtr injerit from MetaAllocatorPtr.
+// https://bugs.webkit.org/show_bug.cgi?id=185145
 template<PtrTag tag>
 class MacroAssemblerCodePtr : private MacroAssemblerCodePtrBase {
 public:
@@ -402,7 +404,7 @@
     MacroAssemblerCodeRef() = default;
 
     MacroAssemblerCodeRef(Ref<ExecutableMemoryHandle>&& executableMemory)
-        : m_codePtr(tagCodePtr<tag>(executableMemory->start()))
+        : m_codePtr(executableMemory->start().retaggedPtr<tag>())
         , m_executableMemory(WTFMove(executableMemory))
     {
         ASSERT(m_executableMemory->isManaged());

Modified: trunk/Source/_javascript_Core/bytecode/InlineAccess.cpp (231174 => 231175)


--- trunk/Source/_javascript_Core/bytecode/InlineAccess.cpp	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/bytecode/InlineAccess.cpp	2018-04-30 22:29:21 UTC (rev 231175)
@@ -132,7 +132,7 @@
 {
     if (jit.m_assembler.buffer().codeSize() <= stubInfo.patch.inlineSize) {
         bool needsBranchCompaction = false;
-        LinkBuffer linkBuffer(jit, stubInfo.patch.start.dataLocation(), stubInfo.patch.inlineSize, JITCompilationMustSucceed, needsBranchCompaction);
+        LinkBuffer linkBuffer(jit, stubInfo.patch.start, stubInfo.patch.inlineSize, JITCompilationMustSucceed, needsBranchCompaction);
         ASSERT(linkBuffer.isValid());
         function(linkBuffer);
         FINALIZE_CODE(linkBuffer, NoPtrTag, "InlineAccessType: '%s'", name);
@@ -286,7 +286,7 @@
 
     // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
     bool needsBranchCompaction = false;
-    LinkBuffer linkBuffer(jit, stubInfo.patch.start.dataLocation(), jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
+    LinkBuffer linkBuffer(jit, stubInfo.patch.start, jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
     RELEASE_ASSERT(linkBuffer.isValid());
     linkBuffer.link(jump, target);
 

Modified: trunk/Source/_javascript_Core/dfg/DFGJITCode.cpp (231174 => 231175)


--- trunk/Source/_javascript_Core/dfg/DFGJITCode.cpp	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/dfg/DFGJITCode.cpp	2018-04-30 22:29:21 UTC (rev 231175)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -229,7 +229,7 @@
 {
     for (OSRExit& exit : osrExit) {
         if (ExecutableMemoryHandle* handle = exit.m_code.executableMemory()) {
-            if (handle->start() <= pc && pc < handle->end())
+            if (handle->start().untaggedPtr() <= pc && pc < handle->end().untaggedPtr())
                 return std::optional<CodeOrigin>(exit.m_codeOriginForExitProfile);
         }
     }

Modified: trunk/Source/_javascript_Core/ftl/FTLJITCode.cpp (231174 => 231175)


--- trunk/Source/_javascript_Core/ftl/FTLJITCode.cpp	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/ftl/FTLJITCode.cpp	2018-04-30 22:29:21 UTC (rev 231175)
@@ -155,7 +155,7 @@
 {
     for (OSRExit& exit : osrExit) {
         if (ExecutableMemoryHandle* handle = exit.m_code.executableMemory()) {
-            if (handle->start() <= pc && pc < handle->end())
+            if (handle->start().untaggedPtr() <= pc && pc < handle->end().untaggedPtr())
                 return std::optional<CodeOrigin>(exit.m_codeOriginForExitProfile);
         }
     }
@@ -162,7 +162,7 @@
 
     for (std::unique_ptr<LazySlowPath>& lazySlowPath : lazySlowPaths) {
         if (ExecutableMemoryHandle* handle = lazySlowPath->stub().executableMemory()) {
-            if (handle->start() <= pc && pc < handle->end())
+            if (handle->start().untaggedPtr() <= pc && pc < handle->end().untaggedPtr())
                 return std::optional<CodeOrigin>(codeBlock->codeOrigin(lazySlowPath->callSiteIndex()));
         }
     }

Modified: trunk/Source/_javascript_Core/jit/ExecutableAllocator.cpp (231174 => 231175)


--- trunk/Source/_javascript_Core/jit/ExecutableAllocator.cpp	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/jit/ExecutableAllocator.cpp	2018-04-30 22:29:21 UTC (rev 231175)
@@ -100,8 +100,8 @@
 static const double executablePoolReservationFraction = 0.25;
 #endif
 
-JS_EXPORT_PRIVATE uintptr_t startOfFixedExecutableMemoryPool;
-JS_EXPORT_PRIVATE uintptr_t endOfFixedExecutableMemoryPool;
+JS_EXPORT_PRIVATE void* taggedStartOfFixedExecutableMemoryPool;
+JS_EXPORT_PRIVATE void* taggedEndOfFixedExecutableMemoryPool;
 JS_EXPORT_PRIVATE bool useFastPermisionsJITCopy { false };
 
 JS_EXPORT_PRIVATE JITWriteSeparateHeapsFunction jitWriteSeparateHeapsFunction;
@@ -143,8 +143,9 @@
 
             addFreshFreeSpace(reservationBase, reservationSize);
 
-            startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(reservationBase);
-            endOfFixedExecutableMemoryPool = startOfFixedExecutableMemoryPool + reservationSize;
+            void* reservationEnd = reinterpret_cast<uint8_t*>(reservationBase) + reservationSize;
+            taggedStartOfFixedExecutableMemoryPool = tagCodePtr<ExecutableMemoryPtrTag>(reservationBase);
+            taggedEndOfFixedExecutableMemoryPool = tagCodePtr<ExecutableMemoryPtrTag>(reservationEnd);
         }
     }
 
@@ -151,10 +152,10 @@
     virtual ~FixedVMPoolExecutableAllocator();
 
 protected:
-    void* allocateNewSpace(size_t&) override
+    FreeSpacePtr allocateNewSpace(size_t&) override
     {
         // We're operating in a fixed pool, so new allocation is always prohibited.
-        return 0;
+        return nullptr;
     }
 
     void notifyNeedPage(void* page) override
@@ -293,7 +294,8 @@
         local2.link(&jit);
         jit.ret();
 
-        LinkBuffer linkBuffer(jit, stubBase, stubSize);
+        auto stubBaseCodePtr = MacroAssemblerCodePtr<LinkBufferPtrTag>(tagCodePtr<LinkBufferPtrTag>(stubBase));
+        LinkBuffer linkBuffer(jit, stubBaseCodePtr, stubSize);
         // We don't use FINALIZE_CODE() for two reasons.
         // The first is that we don't want the writeable address, as disassembled instructions,
         // to appear in the console or anywhere in memory, via the PrintStream buffer.
@@ -431,6 +433,17 @@
         }
         return nullptr;
     }
+
+#if USE(POINTER_PROFILING)
+    void* start = startOfFixedExecutableMemoryPool();
+    void* end = endOfFixedExecutableMemoryPool();
+    void* resultStart = result->start().untaggedPtr();
+    void* resultEnd = result->end().untaggedPtr();
+    RELEASE_ASSERT(start == removeCodePtrTag(taggedStartOfFixedExecutableMemoryPool));
+    RELEASE_ASSERT(end == removeCodePtrTag(taggedEndOfFixedExecutableMemoryPool));
+    RELEASE_ASSERT(start <= resultStart && resultStart < end);
+    RELEASE_ASSERT(start < resultEnd && resultEnd <= end);
+#endif
     return result;
 }
 

Modified: trunk/Source/_javascript_Core/jit/ExecutableAllocator.h (231174 => 231175)


--- trunk/Source/_javascript_Core/jit/ExecutableAllocator.h	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/jit/ExecutableAllocator.h	2018-04-30 22:29:21 UTC (rev 231175)
@@ -61,13 +61,24 @@
 
 #if ENABLE(ASSEMBLER)
 
-extern JS_EXPORT_PRIVATE uintptr_t startOfFixedExecutableMemoryPool;
-extern JS_EXPORT_PRIVATE uintptr_t endOfFixedExecutableMemoryPool;
+extern JS_EXPORT_PRIVATE void* taggedStartOfFixedExecutableMemoryPool;
+extern JS_EXPORT_PRIVATE void* taggedEndOfFixedExecutableMemoryPool;
 
+template<typename T = void*>
+T startOfFixedExecutableMemoryPool()
+{
+    return untagCodePtr<T, ExecutableMemoryPtrTag>(taggedStartOfFixedExecutableMemoryPool);
+}
+
+template<typename T = void*>
+T endOfFixedExecutableMemoryPool()
+{
+    return untagCodePtr<T, ExecutableMemoryPtrTag>(taggedEndOfFixedExecutableMemoryPool);
+}
+
 inline bool isJITPC(void* pc)
 {
-    return reinterpret_cast<void*>(startOfFixedExecutableMemoryPool) <= pc
-        && pc < reinterpret_cast<void*>(endOfFixedExecutableMemoryPool);
+    return startOfFixedExecutableMemoryPool() <= pc && pc < endOfFixedExecutableMemoryPool();
 }
 
 typedef void (*JITWriteSeparateHeapsFunction)(off_t, const void*, size_t);
@@ -77,7 +88,7 @@
 
 static inline void* performJITMemcpy(void *dst, const void *src, size_t n)
 {
-    if (reinterpret_cast<uintptr_t>(dst) >= startOfFixedExecutableMemoryPool && reinterpret_cast<uintptr_t>(dst) < endOfFixedExecutableMemoryPool) {
+    if (dst >= startOfFixedExecutableMemoryPool() && dst < endOfFixedExecutableMemoryPool()) {
 #if ENABLE(FAST_JIT_PERMISSIONS)
         if (useFastPermisionsJITCopy) {
             os_thread_self_restrict_rwx_to_rw();
@@ -90,7 +101,7 @@
         if (jitWriteSeparateHeapsFunction) {
             // Use execute-only write thunk for writes inside the JIT region. This is a variant of
             // memcpy that takes an offset into the JIT region as its destination (first) parameter.
-            off_t offset = (off_t)((uintptr_t)dst - startOfFixedExecutableMemoryPool);
+            off_t offset = (off_t)((uintptr_t)dst - startOfFixedExecutableMemoryPool<uintptr_t>());
             retagCodePtr<JITThunkPtrTag, CFunctionPtrTag>(jitWriteSeparateHeapsFunction)(offset, src, n);
             return dst;
         }

Modified: trunk/Source/_javascript_Core/jit/JIT.cpp (231174 => 231175)


--- trunk/Source/_javascript_Core/jit/JIT.cpp	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/jit/JIT.cpp	2018-04-30 22:29:21 UTC (rev 231175)
@@ -903,7 +903,7 @@
         adoptRef(*new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
 
     if (JITInternal::verbose)
-        dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
+        dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start().untaggedPtr(), result.executableMemory()->end().untaggedPtr());
 
     return CompilationSuccessful;
 }

Modified: trunk/Source/_javascript_Core/jit/JITMathIC.h (231174 => 231175)


--- trunk/Source/_javascript_Core/jit/JITMathIC.h	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/jit/JITMathIC.h	2018-04-30 22:29:21 UTC (rev 231175)
@@ -137,7 +137,7 @@
             // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
             bool needsBranchCompaction = false;
             RELEASE_ASSERT(jit.m_assembler.buffer().codeSize() <= static_cast<size_t>(m_inlineSize));
-            LinkBuffer linkBuffer(jit, m_inlineStart.dataLocation(), jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
+            LinkBuffer linkBuffer(jit, m_inlineStart, jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
             RELEASE_ASSERT(linkBuffer.isValid());
             linkBuffer.link(jump, CodeLocationLabel<JITStubRoutinePtrTag>(m_code.code()));
             FINALIZE_CODE(linkBuffer, NoPtrTag, "JITMathIC: linking constant jump to out of line stub");

Modified: trunk/Source/_javascript_Core/runtime/JSCPtrTag.h (231174 => 231175)


--- trunk/Source/_javascript_Core/runtime/JSCPtrTag.h	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/runtime/JSCPtrTag.h	2018-04-30 22:29:21 UTC (rev 231175)
@@ -37,6 +37,7 @@
     v(BytecodePtrTag) \
     v(DisassemblyPtrTag) \
     v(ExceptionHandlerPtrTag) \
+    v(ExecutableMemoryPtrTag) \
     v(JITThunkPtrTag) \
     v(JITStubRoutinePtrTag) \
     v(JSEntryPtrTag) \

Modified: trunk/Source/_javascript_Core/wasm/WasmCallee.cpp (231174 => 231175)


--- trunk/Source/_javascript_Core/wasm/WasmCallee.cpp	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/wasm/WasmCallee.cpp	2018-04-30 22:29:21 UTC (rev 231175)
@@ -35,7 +35,7 @@
 Callee::Callee(Entrypoint&& entrypoint)
     : m_entrypoint(WTFMove(entrypoint))
 {
-    registerCode(m_entrypoint.compilation->codeRef().executableMemory()->start(), m_entrypoint.compilation->codeRef().executableMemory()->end());
+    registerCode(m_entrypoint.compilation->codeRef().executableMemory()->start().untaggedPtr(), m_entrypoint.compilation->codeRef().executableMemory()->end().untaggedPtr());
 }
 
 Callee::Callee(Entrypoint&& entrypoint, size_t index, std::pair<const Name*, RefPtr<NameSection>>&& name)
@@ -42,7 +42,7 @@
     : m_entrypoint(WTFMove(entrypoint))
     , m_indexOrName(index, WTFMove(name))
 {
-    registerCode(m_entrypoint.compilation->codeRef().executableMemory()->start(), m_entrypoint.compilation->codeRef().executableMemory()->end());
+    registerCode(m_entrypoint.compilation->codeRef().executableMemory()->start().untaggedPtr(), m_entrypoint.compilation->codeRef().executableMemory()->end().untaggedPtr());
 }
 
 } } // namespace JSC::Wasm

Modified: trunk/Source/_javascript_Core/wasm/WasmFaultSignalHandler.cpp (231174 => 231175)


--- trunk/Source/_javascript_Core/wasm/WasmFaultSignalHandler.cpp	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/_javascript_Core/wasm/WasmFaultSignalHandler.cpp	2018-04-30 22:29:21 UTC (rev 231175)
@@ -59,7 +59,7 @@
     void* faultingInstruction = MachineContext::instructionPointer(context).untaggedExecutableAddress();
     dataLogLnIf(WasmFaultSignalHandlerInternal::verbose, "starting handler for fault at: ", RawPointer(faultingInstruction));
 
-    dataLogLnIf(WasmFaultSignalHandlerInternal::verbose, "JIT memory start: ", RawPointer(reinterpret_cast<void*>(startOfFixedExecutableMemoryPool)), " end: ", RawPointer(reinterpret_cast<void*>(endOfFixedExecutableMemoryPool)));
+    dataLogLnIf(WasmFaultSignalHandlerInternal::verbose, "JIT memory start: ", RawPointer(startOfFixedExecutableMemoryPool()), " end: ", RawPointer(endOfFixedExecutableMemoryPool()));
     // First we need to make sure we are in JIT code before we can aquire any locks. Otherwise,
     // we might have crashed in code that is already holding one of the locks we want to aquire.
     assertIsNotTagged(faultingInstruction);

Modified: trunk/Source/WTF/ChangeLog (231174 => 231175)


--- trunk/Source/WTF/ChangeLog	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/WTF/ChangeLog	2018-04-30 22:29:21 UTC (rev 231175)
@@ -1,3 +1,61 @@
+2018-04-30  Mark Lam  <[email protected]>
+
+        Apply PtrTags to the MetaAllocator and friends.
+        https://bugs.webkit.org/show_bug.cgi?id=185110
+        <rdar://problem/39533895>
+
+        Reviewed by Saam Barati.
+
+        1. Introduce a MetaAllocatorPtr smart pointer to do pointer tagging.
+        2. Use MetaAllocatorPtr in MetaAllocator and MetaAllocatorHandle.
+
+        * WTF.xcodeproj/project.pbxproj:
+        * wtf/CMakeLists.txt:
+        * wtf/MetaAllocator.cpp:
+        (WTF::MetaAllocator::release):
+        (WTF::MetaAllocatorHandle::MetaAllocatorHandle):
+        (WTF::MetaAllocatorHandle::shrink):
+        (WTF::MetaAllocatorHandle::dump const):
+        (WTF::MetaAllocator::allocate):
+        (WTF::MetaAllocator::findAndRemoveFreeSpace):
+        (WTF::MetaAllocator::addFreeSpaceFromReleasedHandle):
+        (WTF::MetaAllocator::addFreshFreeSpace):
+        (WTF::MetaAllocator::debugFreeSpaceSize):
+        (WTF::MetaAllocator::addFreeSpace):
+        (WTF::MetaAllocator::allocFreeSpaceNode):
+        * wtf/MetaAllocator.h:
+        (WTF::MetaAllocatorTracker::find):
+        (WTF::MetaAllocator::FreeSpaceNode::FreeSpaceNode):
+        (WTF::MetaAllocator::FreeSpaceNode::sizeInBytes):
+        (WTF::MetaAllocator::FreeSpaceNode::key):
+        * wtf/MetaAllocatorHandle.h:
+        (WTF::MetaAllocatorHandle::start const):
+        (WTF::MetaAllocatorHandle::end const):
+        (WTF::MetaAllocatorHandle::startAsInteger const):
+        (WTF::MetaAllocatorHandle::endAsInteger const):
+        (WTF::MetaAllocatorHandle::sizeInBytes const):
+        (WTF::MetaAllocatorHandle::containsIntegerAddress const):
+        (WTF::MetaAllocatorHandle::key):
+        * wtf/MetaAllocatorPtr.h: Added.
+        (WTF::MetaAllocatorPtr::MetaAllocatorPtr):
+        (WTF::MetaAllocatorPtr:: const):
+        (WTF::MetaAllocatorPtr::operator bool const):
+        (WTF::MetaAllocatorPtr::operator! const):
+        (WTF::MetaAllocatorPtr::operator== const):
+        (WTF::MetaAllocatorPtr::operator!= const):
+        (WTF::MetaAllocatorPtr::operator+ const):
+        (WTF::MetaAllocatorPtr::operator- const):
+        (WTF::MetaAllocatorPtr::operator+=):
+        (WTF::MetaAllocatorPtr::operator-=):
+        (WTF::MetaAllocatorPtr::isEmptyValue const):
+        (WTF::MetaAllocatorPtr::isDeletedValue const):
+        (WTF::MetaAllocatorPtr::hash const):
+        (WTF::MetaAllocatorPtr::emptyValue):
+        (WTF::MetaAllocatorPtr::deletedValue):
+        (WTF::MetaAllocatorPtrHash::hash):
+        (WTF::MetaAllocatorPtrHash::equal):
+        * wtf/PtrTag.h:
+
 2018-04-30  JF Bastien  <[email protected]>
 
         Use some C++17 features

Modified: trunk/Source/WTF/WTF.xcodeproj/project.pbxproj (231174 => 231175)


--- trunk/Source/WTF/WTF.xcodeproj/project.pbxproj	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/WTF/WTF.xcodeproj/project.pbxproj	2018-04-30 22:29:21 UTC (rev 231175)
@@ -634,6 +634,7 @@
 		FE05FAFE1FE5007500093230 /* WTFAssertions.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = WTFAssertions.cpp; sourceTree = "<group>"; };
 		FE05FB041FE8453200093230 /* PoisonedUniquePtr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PoisonedUniquePtr.h; sourceTree = "<group>"; };
 		FE7497E4208FFCAA0003565B /* PtrTag.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PtrTag.h; sourceTree = "<group>"; };
+		FE7497ED209163060003565B /* MetaAllocatorPtr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MetaAllocatorPtr.h; sourceTree = "<group>"; };
 		FE8225301B2A1E5B00BA68FD /* NakedPtr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NakedPtr.h; sourceTree = "<group>"; };
 		FE85416C1FBE285B008DA5DA /* Poisoned.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Poisoned.cpp; sourceTree = "<group>"; };
 		FE85416D1FBE285C008DA5DA /* Poisoned.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Poisoned.h; sourceTree = "<group>"; };
@@ -965,6 +966,7 @@
 				A8A472CD151A825B004123FF /* MetaAllocator.cpp */,
 				A8A472CE151A825B004123FF /* MetaAllocator.h */,
 				A8A472CF151A825B004123FF /* MetaAllocatorHandle.h */,
+				FE7497ED209163060003565B /* MetaAllocatorPtr.h */,
 				0F66B2821DC97BAB004A1D3F /* MonotonicTime.cpp */,
 				0F66B2831DC97BAB004A1D3F /* MonotonicTime.h */,
 				FE8225301B2A1E5B00BA68FD /* NakedPtr.h */,

Modified: trunk/Source/WTF/wtf/CMakeLists.txt (231174 => 231175)


--- trunk/Source/WTF/wtf/CMakeLists.txt	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/WTF/wtf/CMakeLists.txt	2018-04-30 22:29:21 UTC (rev 231175)
@@ -129,6 +129,7 @@
     MessageQueue.h
     MetaAllocator.h
     MetaAllocatorHandle.h
+    MetaAllocatorPtr.h
     MonotonicTime.h
     NakedPtr.h
     NaturalLoops.h

Modified: trunk/Source/WTF/wtf/MetaAllocator.cpp (231174 => 231175)


--- trunk/Source/WTF/wtf/MetaAllocator.cpp	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/WTF/wtf/MetaAllocator.cpp	2018-04-30 22:29:21 UTC (rev 231175)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -62,8 +62,10 @@
 {
     LockHolder locker(&m_lock);
     if (handle->sizeInBytes()) {
-        decrementPageOccupancy(handle->start(), handle->sizeInBytes());
-        addFreeSpaceFromReleasedHandle(handle->start(), handle->sizeInBytes());
+        void* start = handle->start().untaggedPtr();
+        size_t sizeInBytes = handle->sizeInBytes();
+        decrementPageOccupancy(start, sizeInBytes);
+        addFreeSpaceFromReleasedHandle(FreeSpacePtr(start), sizeInBytes);
     }
 
     if (UNLIKELY(!!m_tracker))
@@ -73,7 +75,7 @@
 MetaAllocatorHandle::MetaAllocatorHandle(MetaAllocator* allocator, void* start, size_t sizeInBytes, void* ownerUID)
     : m_allocator(allocator)
     , m_start(start)
-    , m_sizeInBytes(sizeInBytes)
+    , m_end(reinterpret_cast<char*>(start) + sizeInBytes)
     , m_ownerUID(ownerUID)
 {
     ASSERT(allocator);
@@ -89,33 +91,34 @@
 
 void MetaAllocatorHandle::shrink(size_t newSizeInBytes)
 {
-    ASSERT(newSizeInBytes <= m_sizeInBytes);
-    
+    size_t sizeInBytes = this->sizeInBytes();
+    ASSERT(newSizeInBytes <= sizeInBytes);
+
     LockHolder locker(&m_allocator->m_lock);
 
     newSizeInBytes = m_allocator->roundUp(newSizeInBytes);
     
-    ASSERT(newSizeInBytes <= m_sizeInBytes);
-    
-    if (newSizeInBytes == m_sizeInBytes)
+    ASSERT(newSizeInBytes <= sizeInBytes);
+
+    if (newSizeInBytes == sizeInBytes)
         return;
-    
-    uintptr_t freeStart = reinterpret_cast<uintptr_t>(m_start) + newSizeInBytes;
-    size_t freeSize = m_sizeInBytes - newSizeInBytes;
+
+    uintptr_t freeStart = m_start.untaggedPtr<uintptr_t>() + newSizeInBytes;
+    size_t freeSize = sizeInBytes - newSizeInBytes;
     uintptr_t freeEnd = freeStart + freeSize;
     
     uintptr_t firstCompletelyFreePage = (freeStart + m_allocator->m_pageSize - 1) & ~(m_allocator->m_pageSize - 1);
     if (firstCompletelyFreePage < freeEnd)
         m_allocator->decrementPageOccupancy(reinterpret_cast<void*>(firstCompletelyFreePage), freeSize - (firstCompletelyFreePage - freeStart));
-    
-    m_allocator->addFreeSpaceFromReleasedHandle(reinterpret_cast<void*>(freeStart), freeSize);
-    
-    m_sizeInBytes = newSizeInBytes;
+
+    m_allocator->addFreeSpaceFromReleasedHandle(MetaAllocator::FreeSpacePtr(freeStart), freeSize);
+
+    m_end = m_start + newSizeInBytes;
 }
 
 void MetaAllocatorHandle::dump(PrintStream& out) const
 {
-    out.print(RawPointer(start()), "...", RawPointer(end()));
+    out.print(RawPointer(start().untaggedPtr()), "...", RawPointer(end().untaggedPtr()));
 }
 
 MetaAllocator::MetaAllocator(size_t allocationGranule, size_t pageSize)
@@ -156,8 +159,8 @@
         return nullptr;
     
     sizeInBytes = roundUp(sizeInBytes);
-    
-    void* start = findAndRemoveFreeSpace(sizeInBytes);
+
+    FreeSpacePtr start = findAndRemoveFreeSpace(sizeInBytes);
     if (!start) {
         size_t requestedNumberOfPages = (sizeInBytes + m_pageSize - 1) >> m_logPageSize;
         size_t numberOfPages = requestedNumberOfPages;
@@ -175,18 +178,18 @@
         m_bytesReserved += roundedUpSize;
         
         if (roundedUpSize > sizeInBytes) {
-            void* freeSpaceStart = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start) + sizeInBytes);
+            FreeSpacePtr freeSpaceStart = start + sizeInBytes;
             size_t freeSpaceSize = roundedUpSize - sizeInBytes;
             addFreeSpace(freeSpaceStart, freeSpaceSize);
         }
     }
-    incrementPageOccupancy(start, sizeInBytes);
+    incrementPageOccupancy(start.untaggedPtr(), sizeInBytes);
     m_bytesAllocated += sizeInBytes;
 #if ENABLE(META_ALLOCATOR_PROFILE)
     m_numAllocations++;
 #endif
 
-    auto handle = adoptRef(*new MetaAllocatorHandle(this, start, sizeInBytes, ownerUID));
+    auto handle = adoptRef(*new MetaAllocatorHandle(this, start.untaggedPtr(), sizeInBytes, ownerUID));
 
     if (UNLIKELY(!!m_tracker))
         m_tracker->notify(handle.ptr());
@@ -204,7 +207,7 @@
     return result;
 }
 
-void* MetaAllocator::findAndRemoveFreeSpace(size_t sizeInBytes)
+MetaAllocator::FreeSpacePtr MetaAllocator::findAndRemoveFreeSpace(size_t sizeInBytes)
 {
     FreeSpaceNode* node = m_freeSpaceSizeMap.findLeastGreaterThanOrEqual(sizeInBytes);
     
@@ -211,18 +214,19 @@
     if (!node)
         return 0;
     
-    ASSERT(node->m_sizeInBytes >= sizeInBytes);
-    
+    size_t nodeSizeInBytes = node->sizeInBytes();
+    ASSERT(nodeSizeInBytes >= sizeInBytes);
+
     m_freeSpaceSizeMap.remove(node);
-    
-    void* result;
-    
-    if (node->m_sizeInBytes == sizeInBytes) {
+
+    FreeSpacePtr result;
+
+    if (nodeSizeInBytes == sizeInBytes) {
         // Easy case: perfect fit, so just remove the node entirely.
         result = node->m_start;
         
         m_freeSpaceStartAddressMap.remove(node->m_start);
-        m_freeSpaceEndAddressMap.remove(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_start) + node->m_sizeInBytes));
+        m_freeSpaceEndAddressMap.remove(node->m_end);
         freeFreeSpaceNode(node);
     } else {
         // Try to be a good citizen and ensure that the returned chunk of memory
@@ -232,11 +236,12 @@
         // of committed pages, since in the long run, smaller fragmentation means
         // fewer committed pages and fewer failures in general.
         
-        uintptr_t firstPage = reinterpret_cast<uintptr_t>(node->m_start) >> m_logPageSize;
-        uintptr_t lastPage = (reinterpret_cast<uintptr_t>(node->m_start) + node->m_sizeInBytes - 1) >> m_logPageSize;
-    
-        uintptr_t lastPageForLeftAllocation = (reinterpret_cast<uintptr_t>(node->m_start) + sizeInBytes - 1) >> m_logPageSize;
-        uintptr_t firstPageForRightAllocation = (reinterpret_cast<uintptr_t>(node->m_start) + node->m_sizeInBytes - sizeInBytes) >> m_logPageSize;
+        uintptr_t nodeStartAsInt = node->m_start.untaggedPtr<uintptr_t>();
+        uintptr_t firstPage = nodeStartAsInt >> m_logPageSize;
+        uintptr_t lastPage = (nodeStartAsInt + nodeSizeInBytes - 1) >> m_logPageSize;
+
+        uintptr_t lastPageForLeftAllocation = (nodeStartAsInt + sizeInBytes - 1) >> m_logPageSize;
+        uintptr_t firstPageForRightAllocation = (nodeStartAsInt + nodeSizeInBytes - sizeInBytes) >> m_logPageSize;
         
         if (lastPageForLeftAllocation - firstPage + 1 <= lastPage - firstPageForRightAllocation + 1) {
             // Allocate in the left side of the returned chunk, and slide the node to the right.
@@ -243,21 +248,20 @@
             result = node->m_start;
             
             m_freeSpaceStartAddressMap.remove(node->m_start);
-            
-            node->m_sizeInBytes -= sizeInBytes;
-            node->m_start = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_start) + sizeInBytes);
-            
+
+            node->m_start += sizeInBytes;
+
             m_freeSpaceSizeMap.insert(node);
             m_freeSpaceStartAddressMap.add(node->m_start, node);
         } else {
             // Allocate in the right size of the returned chunk, and slide the node to the left;
-            
-            result = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_start) + node->m_sizeInBytes - sizeInBytes);
-            
-            m_freeSpaceEndAddressMap.remove(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(node->m_start) + node->m_sizeInBytes));
-            
-            node->m_sizeInBytes -= sizeInBytes;
-            
+
+            result = node->m_end - sizeInBytes;
+
+            m_freeSpaceEndAddressMap.remove(node->m_end);
+
+            node->m_end = result;
+
             m_freeSpaceSizeMap.insert(node);
             m_freeSpaceEndAddressMap.add(result, node);
         }
@@ -270,7 +274,7 @@
     return result;
 }
 
-void MetaAllocator::addFreeSpaceFromReleasedHandle(void* start, size_t sizeInBytes)
+void MetaAllocator::addFreeSpaceFromReleasedHandle(FreeSpacePtr start, size_t sizeInBytes)
 {
 #if ENABLE(META_ALLOCATOR_PROFILE)
     m_numFrees++;
@@ -283,7 +287,7 @@
 {
     LockHolder locker(&m_lock);
     m_bytesReserved += sizeInBytes;
-    addFreeSpace(start, sizeInBytes);
+    addFreeSpace(FreeSpacePtr(start), sizeInBytes);
 }
 
 size_t MetaAllocator::debugFreeSpaceSize()
@@ -292,7 +296,7 @@
     LockHolder locker(&m_lock);
     size_t result = 0;
     for (FreeSpaceNode* node = m_freeSpaceSizeMap.first(); node; node = node->successor())
-        result += node->m_sizeInBytes;
+        result += node->sizeInBytes();
     return result;
 #else
     CRASH();
@@ -300,25 +304,23 @@
 #endif
 }
 
-void MetaAllocator::addFreeSpace(void* start, size_t sizeInBytes)
+void MetaAllocator::addFreeSpace(FreeSpacePtr start, size_t sizeInBytes)
 {
-    void* end = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start) + sizeInBytes);
-    
-    HashMap<void*, FreeSpaceNode*>::iterator leftNeighbor = m_freeSpaceEndAddressMap.find(start);
-    HashMap<void*, FreeSpaceNode*>::iterator rightNeighbor = m_freeSpaceStartAddressMap.find(end);
-    
+    FreeSpacePtr end = start + sizeInBytes;
+
+    HashMap<FreeSpacePtr, FreeSpaceNode*>::iterator leftNeighbor = m_freeSpaceEndAddressMap.find(start);
+    HashMap<FreeSpacePtr, FreeSpaceNode*>::iterator rightNeighbor = m_freeSpaceStartAddressMap.find(end);
+
     if (leftNeighbor != m_freeSpaceEndAddressMap.end()) {
         // We have something we can coalesce with on the left. Remove it from the tree, and
         // remove its end from the end address map.
         
-        ASSERT(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(leftNeighbor->value->m_start) + leftNeighbor->value->m_sizeInBytes) == leftNeighbor->key);
-        
+        ASSERT(leftNeighbor->value->m_end == leftNeighbor->key);
+
         FreeSpaceNode* leftNode = leftNeighbor->value;
-        
-        void* leftStart = leftNode->m_start;
-        size_t leftSize = leftNode->m_sizeInBytes;
-        void* leftEnd = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(leftStart) + leftSize);
-        
+
+        FreeSpacePtr leftEnd = leftNode->m_end;
+
         ASSERT(leftEnd == start);
         
         m_freeSpaceSizeMap.remove(leftNode);
@@ -332,26 +334,26 @@
             ASSERT(rightNeighbor->value->m_start == rightNeighbor->key);
             
             FreeSpaceNode* rightNode = rightNeighbor->value;
-            void* rightStart = rightNeighbor->key;
-            size_t rightSize = rightNode->m_sizeInBytes;
-            void* rightEnd = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(rightStart) + rightSize);
-            
+            FreeSpacePtr rightStart = rightNeighbor->key;
+            size_t rightSize = rightNode->sizeInBytes();
+            FreeSpacePtr rightEnd = rightNode->m_end;
+
             ASSERT(rightStart == end);
-            ASSERT(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(leftStart) + leftSize + sizeInBytes + rightSize) == rightEnd);
-            
+            ASSERT(leftNode->m_start + (leftNode->sizeInBytes() + sizeInBytes + rightSize) == rightEnd);
+
             m_freeSpaceSizeMap.remove(rightNode);
             m_freeSpaceStartAddressMap.remove(rightStart);
             m_freeSpaceEndAddressMap.remove(rightEnd);
             
             freeFreeSpaceNode(rightNode);
-            
-            leftNode->m_sizeInBytes += sizeInBytes + rightSize;
-            
+
+            leftNode->m_end += (sizeInBytes + rightSize);
+
             m_freeSpaceSizeMap.insert(leftNode);
             m_freeSpaceEndAddressMap.add(rightEnd, leftNode);
         } else {
-            leftNode->m_sizeInBytes += sizeInBytes;
-            
+            leftNode->m_end += sizeInBytes;
+
             m_freeSpaceSizeMap.insert(leftNode);
             m_freeSpaceEndAddressMap.add(end, leftNode);
         }
@@ -360,19 +362,16 @@
         
         if (rightNeighbor != m_freeSpaceStartAddressMap.end()) {
             FreeSpaceNode* rightNode = rightNeighbor->value;
-            void* rightStart = rightNeighbor->key;
-            size_t rightSize = rightNode->m_sizeInBytes;
-            void* rightEnd = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(rightStart) + rightSize);
-            
+            FreeSpacePtr rightStart = rightNeighbor->key;
+
             ASSERT(rightStart == end);
-            ASSERT_UNUSED(rightEnd, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start) + sizeInBytes + rightSize) == rightEnd);
-            
+            ASSERT(start + (sizeInBytes + rightNode->sizeInBytes()) == rightNode->m_end);
+
             m_freeSpaceSizeMap.remove(rightNode);
             m_freeSpaceStartAddressMap.remove(rightStart);
-            
-            rightNode->m_sizeInBytes += sizeInBytes;
+
             rightNode->m_start = start;
-            
+
             m_freeSpaceSizeMap.insert(rightNode);
             m_freeSpaceStartAddressMap.add(start, rightNode);
         } else {
@@ -379,10 +378,10 @@
             // Nothing to coalesce with, so create a new free space node and add it.
             
             FreeSpaceNode* node = allocFreeSpaceNode();
-            
-            node->m_sizeInBytes = sizeInBytes;
+
             node->m_start = start;
-            
+            node->m_end = start + sizeInBytes;
+
             m_freeSpaceSizeMap.insert(node);
             m_freeSpaceStartAddressMap.add(start, node);
             m_freeSpaceEndAddressMap.add(end, node);
@@ -445,7 +444,7 @@
 #ifndef NDEBUG
     m_mallocBalance++;
 #endif
-    return new (NotNull, fastMalloc(sizeof(FreeSpaceNode))) FreeSpaceNode(0, 0);
+    return new (NotNull, fastMalloc(sizeof(FreeSpaceNode))) FreeSpaceNode();
 }
 
 void MetaAllocator::freeFreeSpaceNode(FreeSpaceNode* node)

Modified: trunk/Source/WTF/wtf/MetaAllocator.h (231174 => 231175)


--- trunk/Source/WTF/wtf/MetaAllocator.h	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/WTF/wtf/MetaAllocator.h	2018-04-30 22:29:21 UTC (rev 231175)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -51,7 +51,7 @@
     MetaAllocatorHandle* find(void* address)
     {
         MetaAllocatorHandle* handle = m_allocations.findGreatestLessThanOrEqual(address);
-        if (handle && address < handle->end())
+        if (handle && address < handle->end().untaggedPtr())
             return handle;
         return 0;
     }
@@ -63,6 +63,8 @@
     WTF_MAKE_NONCOPYABLE(MetaAllocator);
 
 public:
+    using FreeSpacePtr = MetaAllocatorPtr<FreeSpacePtrTag>;
+
     WTF_EXPORT_PRIVATE MetaAllocator(size_t allocationGranule, size_t pageSize = WTF::pageSize());
     
     WTF_EXPORT_PRIVATE virtual ~MetaAllocator();
@@ -109,8 +111,8 @@
     
     // Allocate new virtual space, but don't commit. This may return more
     // pages than we asked, in which case numPages is changed.
-    virtual void* allocateNewSpace(size_t& numPages) = 0;
-    
+    virtual FreeSpacePtr allocateNewSpace(size_t& numPages) = 0;
+
     // Commit a page.
     virtual void notifyNeedPage(void* page) = 0;
     
@@ -127,19 +129,25 @@
     
     class FreeSpaceNode : public RedBlackTree<FreeSpaceNode, size_t>::Node {
     public:
+        FreeSpaceNode() = default;
+
         FreeSpaceNode(void* start, size_t sizeInBytes)
             : m_start(start)
-            , m_sizeInBytes(sizeInBytes)
+            , m_end(reinterpret_cast<uint8_t*>(start) + sizeInBytes)
+        { }
+
+        size_t sizeInBytes()
         {
+            return m_end.untaggedPtr<size_t>() - m_start.untaggedPtr<size_t>();
         }
 
         size_t key()
         {
-            return m_sizeInBytes;
+            return sizeInBytes();
         }
 
-        void* m_start;
-        size_t m_sizeInBytes;
+        FreeSpacePtr m_start;
+        FreeSpacePtr m_end;
     };
     typedef RedBlackTree<FreeSpaceNode, size_t> Tree;
 
@@ -149,16 +157,16 @@
     // Remove free space from the allocator. This is effectively
     // the allocate() function, except that it does not mark the
     // returned space as being in-use.
-    void* findAndRemoveFreeSpace(size_t sizeInBytes);
+    FreeSpacePtr findAndRemoveFreeSpace(size_t sizeInBytes);
 
     // This is called when memory from an allocation is freed.
-    void addFreeSpaceFromReleasedHandle(void* start, size_t sizeInBytes);
-    
+    void addFreeSpaceFromReleasedHandle(FreeSpacePtr start, size_t sizeInBytes);
+
     // This is the low-level implementation of adding free space; it
     // is called from both addFreeSpaceFromReleasedHandle and from
     // addFreshFreeSpace.
-    void addFreeSpace(void* start, size_t sizeInBytes);
-    
+    void addFreeSpace(FreeSpacePtr start, size_t sizeInBytes);
+
     // Management of used space.
     
     void incrementPageOccupancy(void* address, size_t sizeInBytes);
@@ -177,8 +185,8 @@
     unsigned m_logPageSize;
     
     Tree m_freeSpaceSizeMap;
-    HashMap<void*, FreeSpaceNode*> m_freeSpaceStartAddressMap;
-    HashMap<void*, FreeSpaceNode*> m_freeSpaceEndAddressMap;
+    HashMap<FreeSpacePtr, FreeSpaceNode*> m_freeSpaceStartAddressMap;
+    HashMap<FreeSpacePtr, FreeSpaceNode*> m_freeSpaceEndAddressMap;
     HashMap<uintptr_t, size_t> m_pageOccupancyMap;
     
     size_t m_bytesAllocated;

Modified: trunk/Source/WTF/wtf/MetaAllocatorHandle.h (231174 => 231175)


--- trunk/Source/WTF/wtf/MetaAllocatorHandle.h	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/WTF/wtf/MetaAllocatorHandle.h	2018-04-30 22:29:21 UTC (rev 231175)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011, 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -30,6 +30,7 @@
 #define WTF_MetaAllocatorHandle_h
 
 #include <wtf/Assertions.h>
+#include <wtf/MetaAllocatorPtr.h>
 #include <wtf/RedBlackTree.h>
 #include <wtf/ThreadSafeRefCounted.h>
 
@@ -41,38 +42,40 @@
 class MetaAllocatorHandle : public ThreadSafeRefCounted<MetaAllocatorHandle>, public RedBlackTree<MetaAllocatorHandle, void*>::Node {
 private:
     MetaAllocatorHandle(MetaAllocator*, void* start, size_t sizeInBytes, void* ownerUID);
-    
+
 public:
+    using MemoryPtr = MetaAllocatorPtr<HandleMemoryPtrTag>;
+
     WTF_EXPORT_PRIVATE ~MetaAllocatorHandle();
-    
-    void* start() const
+
+    MemoryPtr start() const
     {
         return m_start;
     }
-    
-    void* end() const
+
+    MemoryPtr end() const
     {
-        return reinterpret_cast<void*>(endAsInteger());
+        return m_end;
     }
-    
+
     uintptr_t startAsInteger() const
     {
-        return reinterpret_cast<uintptr_t>(m_start);
+        return m_start.untaggedPtr<uintptr_t>();
     }
-    
+
     uintptr_t endAsInteger() const
     {
-        return startAsInteger() + m_sizeInBytes;
+        return m_end.untaggedPtr<uintptr_t>();
     }
-        
+
     size_t sizeInBytes() const
     {
-        return m_sizeInBytes;
+        return m_end.untaggedPtr<size_t>() - m_start.untaggedPtr<size_t>();
     }
     
     bool containsIntegerAddress(uintptr_t address) const
     {
-        return address - startAsInteger() < sizeInBytes();
+        return address >= startAsInteger() && address < endAsInteger();
     }
     
     bool contains(void* address) const
@@ -100,7 +103,7 @@
 
     void* key()
     {
-        return m_start;
+        return m_start.untaggedPtr();
     }
 
     WTF_EXPORT_PRIVATE void dump(PrintStream& out) const;
@@ -109,8 +112,8 @@
     friend class MetaAllocator;
     
     MetaAllocator* m_allocator;
-    void* m_start;
-    size_t m_sizeInBytes;
+    MemoryPtr m_start;
+    MemoryPtr m_end;
     void* m_ownerUID;
 };
 

Added: trunk/Source/WTF/wtf/MetaAllocatorPtr.h (0 => 231175)


--- trunk/Source/WTF/wtf/MetaAllocatorPtr.h	                        (rev 0)
+++ trunk/Source/WTF/wtf/MetaAllocatorPtr.h	2018-04-30 22:29:21 UTC (rev 231175)
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2018 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <wtf/HashTraits.h>
+#include <wtf/PtrTag.h>
+#include <wtf/StdLibExtras.h>
+
+namespace WTF {
+
+template<PtrTag tag>
+class MetaAllocatorPtr {
+public:
+    MetaAllocatorPtr() = default;
+    MetaAllocatorPtr(std::nullptr_t) { }
+
+    explicit MetaAllocatorPtr(void* ptr)
+        : m_ptr(tagCodePtr<tag>(ptr))
+    {
+        assertIsNotTagged(ptr);
+    }
+
+    explicit MetaAllocatorPtr(uintptr_t ptrAsInt)
+        : MetaAllocatorPtr(reinterpret_cast<void*>(ptrAsInt))
+    { }
+
+    template<typename T = void*>
+    T untaggedPtr() const { return bitwise_cast<T>(untagCodePtr<tag>(m_ptr)); }
+
+    template<PtrTag newTag, typename T = void*>
+    T retaggedPtr() const { return bitwise_cast<T>(retagCodePtr<tag, newTag>(m_ptr)); }
+
+    // Disallow any casting operations (except for booleans).
+    template<typename T, typename = std::enable_if_t<!std::is_same<T, bool>::value>>
+    operator T() = delete;
+
+    explicit operator bool() const { return !!m_ptr; }
+    bool operator!() const { return !m_ptr; }
+
+    bool operator==(MetaAllocatorPtr other) const { return m_ptr == other.m_ptr; }
+    bool operator!=(MetaAllocatorPtr other) const { return m_ptr != other.m_ptr; }
+
+    MetaAllocatorPtr operator+(size_t sizeInBytes) const { return MetaAllocatorPtr(untaggedPtr<uint8_t*>() + sizeInBytes); }
+    MetaAllocatorPtr operator-(size_t sizeInBytes) const { return MetaAllocatorPtr(untaggedPtr<uint8_t*>() - sizeInBytes); }
+
+    MetaAllocatorPtr& operator+=(size_t sizeInBytes)
+    {
+        return *this = *this + sizeInBytes;
+    }
+
+    MetaAllocatorPtr& operator-=(size_t sizeInBytes)
+    {
+        return *this = *this - sizeInBytes;
+    }
+
+    enum EmptyValueTag { EmptyValue };
+    enum DeletedValueTag { DeletedValue };
+
+    MetaAllocatorPtr(EmptyValueTag)
+        : m_ptr(emptyValue())
+    { }
+
+    MetaAllocatorPtr(DeletedValueTag)
+        : m_ptr(deletedValue())
+    { }
+
+    bool isEmptyValue() const { return m_ptr == emptyValue(); }
+    bool isDeletedValue() const { return m_ptr == deletedValue(); }
+
+    unsigned hash() const { return PtrHash<void*>::hash(m_ptr); }
+
+private:
+    static void* emptyValue() { return reinterpret_cast<void*>(1); }
+    static void* deletedValue() { return reinterpret_cast<void*>(2); }
+
+    void* m_ptr { nullptr };
+};
+
+template<PtrTag tag>
+struct MetaAllocatorPtrHash {
+    static unsigned hash(const MetaAllocatorPtr<tag>& ptr) { return ptr.hash(); }
+    static bool equal(const MetaAllocatorPtr<tag>& a, const MetaAllocatorPtr<tag>& b)
+    {
+        return a == b;
+    }
+    static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+template<typename T> struct DefaultHash;
+template<PtrTag tag> struct DefaultHash<MetaAllocatorPtr<tag>> {
+    typedef MetaAllocatorPtrHash<tag> Hash;
+};
+
+template<typename T> struct HashTraits;
+template<PtrTag tag> struct HashTraits<MetaAllocatorPtr<tag>> : public CustomHashTraits<MetaAllocatorPtr<tag>> { };
+
+} // namespace WTF
+
+using WTF::MetaAllocatorPtr;

Modified: trunk/Source/WTF/wtf/PtrTag.h (231174 => 231175)


--- trunk/Source/WTF/wtf/PtrTag.h	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Source/WTF/wtf/PtrTag.h	2018-04-30 22:29:21 UTC (rev 231175)
@@ -34,6 +34,8 @@
     v(CFunctionPtrTag) \
 
 #define FOR_EACH_ADDITIONAL_WTF_PTRTAG(v) \
+    v(FreeSpacePtrTag) \
+    v(HandleMemoryPtrTag) \
 
 #define FOR_EACH_WTF_PTRTAG(v) \
     FOR_EACH_BASE_WTF_PTRTAG(v) \

Modified: trunk/Tools/ChangeLog (231174 => 231175)


--- trunk/Tools/ChangeLog	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Tools/ChangeLog	2018-04-30 22:29:21 UTC (rev 231175)
@@ -1,3 +1,18 @@
+2018-04-30  Mark Lam  <[email protected]>
+
+        Apply PtrTags to the MetaAllocator and friends.
+        https://bugs.webkit.org/show_bug.cgi?id=185110
+        <rdar://problem/39533895>
+
+        Reviewed by Saam Barati.
+
+        Update the test to match MetaAllocator changes in WTF.
+
+        * TestWebKitAPI/Tests/WTF/MetaAllocator.cpp:
+        (TestWebKitAPI::TEST_F):
+        (WTF::tagForPtr):
+        (WTF::ptrTagName):
+
 2018-04-30  Alex Christensen  <[email protected]>
 
         Add WKUIDelegatePrivate equivalent of WKPageContextMenuClient getContextMenuFromProposedMenuAsync

Modified: trunk/Tools/TestWebKitAPI/Tests/WTF/MetaAllocator.cpp (231174 => 231175)


--- trunk/Tools/TestWebKitAPI/Tests/WTF/MetaAllocator.cpp	2018-04-30 22:21:42 UTC (rev 231174)
+++ trunk/Tools/TestWebKitAPI/Tests/WTF/MetaAllocator.cpp	2018-04-30 22:29:21 UTC (rev 231175)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2018 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -64,11 +64,11 @@
             m_parent->allocatorDestroyed = true;
         }
         
-        virtual void* allocateNewSpace(size_t& numPages)
+        virtual FreeSpacePtr allocateNewSpace(size_t& numPages)
         {
             switch (m_parent->currentHeapGrowthMode) {
             case DontGrowHeap:
-                return 0;
+                return nullptr;
                 
             case ForTestDemandAllocCoalesce:
             case ForTestDemandAllocDontCoalesce: {
@@ -96,12 +96,12 @@
                 
                 m_parent->additionalPagesInHeap += numPages;
         
-                return result;
+                return FreeSpacePtr(result);
             }
                 
             default:
                 CRASH();
-                return 0;
+                return nullptr;
             }
         }
         
@@ -190,8 +190,8 @@
         EXPECT_TRUE(handle);
         EXPECT_EQ(handle->sizeInBytes(), sizeInBytes);
         
-        uintptr_t startByte = reinterpret_cast<uintptr_t>(handle->start());
-        uintptr_t endByte = startByte + sizeInBytes;
+        uintptr_t startByte = handle->start().untaggedPtr<uintptr_t>();
+        uintptr_t endByte = handle->end().untaggedPtr<uintptr_t>();
         for (uintptr_t currentByte = startByte; currentByte < endByte; ++currentByte) {
             EXPECT_TRUE(!byteState(currentByte));
             byteState(currentByte) = true;
@@ -208,7 +208,7 @@
     {
         EXPECT_TRUE(handle);
         
-        notifyFree(handle->start(), handle->sizeInBytes());
+        notifyFree(handle->start().untaggedPtr(), handle->sizeInBytes());
         handle->deref();
         
         if (sanityCheckMode == RunSanityCheck)
@@ -237,13 +237,13 @@
     
     void confirm(MetaAllocatorHandle* handle)
     {
-        uintptr_t startByte = reinterpret_cast<uintptr_t>(handle->start());
+        uintptr_t startByte = handle->start().untaggedPtr<uintptr_t>();
         confirm(startByte, startByte + handle->sizeInBytes(), true);
     }
     
     void confirmHighWatermark(MetaAllocatorHandle* handle)
     {
-        confirm(reinterpret_cast<uintptr_t>(handle->end()), (basePage + defaultPagesInHeap) * pageSize(), false);
+        confirm(handle->end().untaggedPtr<uintptr_t>(), (basePage + defaultPagesInHeap) * pageSize(), false);
     }
                 
     void confirm(uintptr_t startByte, uintptr_t endByte, bool value)
@@ -306,7 +306,7 @@
         // verifies that the state of pages is correct.
         
         MetaAllocatorHandle* handle = allocate(size);
-        EXPECT_EQ(handle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(handle->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
         EXPECT_EQ(handle->sizeInBytes(), size);
         EXPECT_TRUE(pageState(basePage));
         
@@ -325,7 +325,7 @@
         // allocations should behave the same as the first one.
         
         MetaAllocatorHandle* handle = allocate(firstSize);
-        EXPECT_EQ(handle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(handle->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
         EXPECT_EQ(handle->sizeInBytes(), firstSize);
         
         confirm(handle);
@@ -337,7 +337,7 @@
         va_start(argList, firstSize);
         while (size_t sizeInBytes = va_arg(argList, int)) {
             handle = allocate(sizeInBytes);
-            EXPECT_EQ(handle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+            EXPECT_EQ(handle->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
             EXPECT_EQ(handle->sizeInBytes(), sizeInBytes);
             
             confirm(handle);
@@ -359,7 +359,7 @@
         // picked in such a way that it never straddles a page.
         
         MetaAllocatorHandle* firstHandle = allocate(firstSize);
-        EXPECT_EQ(firstHandle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(firstHandle->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
         EXPECT_EQ(firstHandle->sizeInBytes(), firstSize);
         
         confirm(firstHandle);
@@ -366,7 +366,7 @@
         confirmHighWatermark(firstHandle);
 
         MetaAllocatorHandle* secondHandle = allocate(secondSize);
-        EXPECT_EQ(secondHandle->start(), reinterpret_cast<void*>(basePage * pageSize() + firstSize));
+        EXPECT_EQ(secondHandle->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize() + firstSize));
         EXPECT_EQ(secondHandle->sizeInBytes(), secondSize);
         
         confirm(firstHandle);
@@ -383,7 +383,7 @@
         confirm(basePage * pageSize(), (basePage + defaultPagesInHeap) * pageSize(), false);
         
         MetaAllocatorHandle* thirdHandle = allocate(thirdSize);
-        EXPECT_EQ(thirdHandle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(thirdHandle->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
         EXPECT_EQ(thirdHandle->sizeInBytes(), thirdSize);
         
         confirm(thirdHandle);
@@ -408,7 +408,7 @@
         va_start(argList, mode);
         while (size_t sizeInBytes = va_arg(argList, int)) {
             MetaAllocatorHandle* handle = allocate(sizeInBytes);
-            EXPECT_EQ(handle->start(), reinterpret_cast<void*>(basePage * pageSize() + totalSize));
+            EXPECT_EQ(handle->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize() + totalSize));
             EXPECT_EQ(handle->sizeInBytes(), sizeInBytes);
             
             confirm(handle);
@@ -428,7 +428,7 @@
             free(handles.at(index));
             if (mode == TestFIFOAllocMode::EagerFill) {
                 MetaAllocatorHandle* handle = allocate(sizeSoFar);
-                EXPECT_EQ(handle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+                EXPECT_EQ(handle->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
                 EXPECT_EQ(handle->sizeInBytes(), sizeSoFar);
                 
                 confirm(basePage * pageSize(), basePage * pageSize() + totalSize, true);
@@ -449,7 +449,7 @@
         
         if (mode == TestFIFOAllocMode::FillAtEnd) {
             MetaAllocatorHandle* finalHandle = allocate(totalSize);
-            EXPECT_EQ(finalHandle->start(), reinterpret_cast<void*>(basePage * pageSize()));
+            EXPECT_EQ(finalHandle->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
             EXPECT_EQ(finalHandle->sizeInBytes(), totalSize);
             
             confirm(finalHandle);
@@ -479,16 +479,16 @@
     void testRightAllocation(size_t firstLeftSize, size_t firstRightSize, size_t secondLeftSize, size_t secondRightSize)
     {
         MetaAllocatorHandle* firstLeft = allocate(firstLeftSize);
-        EXPECT_EQ(firstLeft->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(firstLeft->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
         
         MetaAllocatorHandle* firstRight = allocate(firstRightSize);
-        EXPECT_EQ(firstRight->end(), reinterpret_cast<void*>((basePage + defaultPagesInHeap) * pageSize()));
+        EXPECT_EQ(firstRight->end().untaggedPtr(), reinterpret_cast<void*>((basePage + defaultPagesInHeap) * pageSize()));
         
         MetaAllocatorHandle* secondLeft = allocate(secondLeftSize);
-        EXPECT_EQ(secondLeft->start(), reinterpret_cast<void*>(basePage * pageSize() + firstLeft->sizeInBytes()));
+        EXPECT_EQ(secondLeft->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize() + firstLeft->sizeInBytes()));
         
         MetaAllocatorHandle* secondRight = allocate(secondRightSize);
-        EXPECT_EQ(secondRight->end(), reinterpret_cast<void*>((basePage + defaultPagesInHeap) * pageSize() - firstRight->sizeInBytes()));
+        EXPECT_EQ(secondRight->end().untaggedPtr(), reinterpret_cast<void*>((basePage + defaultPagesInHeap) * pageSize() - firstRight->sizeInBytes()));
         
         free(firstLeft);
         free(firstRight);
@@ -496,7 +496,7 @@
         free(secondRight);
         
         MetaAllocatorHandle* final = allocate(defaultPagesInHeap * pageSize());
-        EXPECT_EQ(final->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(final->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
         
         free(final);
     }
@@ -511,7 +511,7 @@
         for (unsigned index = 0; index < numSlots; ++index) {
             MetaAllocatorHandle* toFree = allocate(size, sanityCheckMode);
             if (!handles.isEmpty()) {
-                while (toFree->start() != handles.last()->end()) {
+                while (toFree->start().untaggedPtr() != handles.last()->end().untaggedPtr()) {
                     handlesToFree.append(toFree);
                     toFree = allocate(size, sanityCheckMode);
                 }
@@ -518,9 +518,9 @@
             }
 
             MetaAllocatorHandle* fragger = allocate(32, sanityCheckMode);
-            EXPECT_EQ(fragger->start(), toFree->end());
+            EXPECT_EQ(fragger->start().untaggedPtr(), toFree->end().untaggedPtr());
             
-            locations.append(toFree->start());
+            locations.append(toFree->start().untaggedPtr());
 
             handlesToFree.append(toFree);
             handles.append(fragger);
@@ -537,14 +537,14 @@
         for (unsigned index = 0; index < numSlots; ++index) {
             MetaAllocatorHandle* bestFit = allocate(size - 32, sanityCheckMode);
             
-            EXPECT_TRUE(bestFit->start() == locations.at(index)
-                        || bestFit->end() == reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(locations.at(index)) + size));
+            EXPECT_TRUE(bestFit->start().untaggedPtr() == locations.at(index)
+                || bestFit->end().untaggedPtr() == reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(locations.at(index)) + size));
             
             MetaAllocatorHandle* small = allocate(32, sanityCheckMode);
-            if (bestFit->start() == locations.at(index))
-                EXPECT_EQ(small->start(), bestFit->end());
+            if (bestFit->start().untaggedPtr() == locations.at(index))
+                EXPECT_EQ(small->start().untaggedPtr(), bestFit->end().untaggedPtr());
             else
-                EXPECT_EQ(small->end(), bestFit->start());
+                EXPECT_EQ(small->end().untaggedPtr(), bestFit->start().untaggedPtr());
             
             free(bestFit, sanityCheckMode);
             free(small, sanityCheckMode);
@@ -556,7 +556,7 @@
             free(handles.at(index), sanityCheckMode);
         
         MetaAllocatorHandle* final = allocate(defaultPagesInHeap * pageSize(), sanityCheckMode);
-        EXPECT_EQ(final->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(final->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
         
         free(final, sanityCheckMode);
     }
@@ -567,7 +567,7 @@
         MetaAllocatorHandle* handle = allocate(firstSize);
         
         // Shrink it, and make sure that our state reflects the shrinkage.
-        notifyFree(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(handle->start()) + secondSize), firstSize - secondSize);
+        notifyFree(reinterpret_cast<void*>(handle->start().untaggedPtr<uintptr_t>() + secondSize), firstSize - secondSize);
         
         handle->shrink(secondSize);
         EXPECT_EQ(handle->sizeInBytes(), secondSize);
@@ -579,7 +579,7 @@
         
         // Allocate the remainder of the heap.
         MetaAllocatorHandle* remainder = allocate(defaultPagesInHeap * pageSize() - secondSize);
-        EXPECT_EQ(remainder->start(), handle->end());
+        EXPECT_EQ(remainder->start().untaggedPtr(), handle->end().untaggedPtr());
         
         free(remainder);
         free(handle);
@@ -586,7 +586,7 @@
         
         // Assert that the heap is empty and finish up.
         MetaAllocatorHandle* final = allocate(defaultPagesInHeap * pageSize());
-        EXPECT_EQ(final->start(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(final->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
         
         free(final);
     }
@@ -608,7 +608,7 @@
         EXPECT_TRUE(currentHeapGrowthMode == DontGrowHeap);
         EXPECT_EQ(allowAllocatePages, static_cast<size_t>(0));
         EXPECT_EQ(requestedNumPages, (secondSize + pageSize() - 1) / pageSize());
-        EXPECT_EQ(secondHandle->start(), reinterpret_cast<void*>((basePage + defaultPagesInHeap) * pageSize()));
+        EXPECT_EQ(secondHandle->start().untaggedPtr(), reinterpret_cast<void*>((basePage + defaultPagesInHeap) * pageSize()));
         
         requestedNumPages = 0;
         
@@ -637,7 +637,7 @@
         EXPECT_TRUE(currentHeapGrowthMode == DontGrowHeap);
         EXPECT_EQ(allowAllocatePages, static_cast<size_t>(0));
         EXPECT_EQ(requestedNumPages, (secondSize + pageSize() - 1) / pageSize());
-        EXPECT_EQ(secondHandle->start(), reinterpret_cast<void*>((basePage + defaultPagesInHeap + 1) * pageSize()));
+        EXPECT_EQ(secondHandle->start().untaggedPtr(), reinterpret_cast<void*>((basePage + defaultPagesInHeap + 1) * pageSize()));
         
         requestedNumPages = 0;
         
@@ -650,8 +650,8 @@
         
         firstHandle = allocate(firstSize);
         secondHandle = allocate(secondSize);
-        EXPECT_EQ(firstHandle->start(), reinterpret_cast<void*>(basePage * pageSize()));
-        EXPECT_EQ(secondHandle->start(), reinterpret_cast<void*>((basePage + defaultPagesInHeap + 1) * pageSize()));
+        EXPECT_EQ(firstHandle->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
+        EXPECT_EQ(secondHandle->start().untaggedPtr(), reinterpret_cast<void*>((basePage + defaultPagesInHeap + 1) * pageSize()));
         free(firstHandle);
         free(secondHandle);
     }
@@ -670,7 +670,7 @@
     ASSERT(!allocator->allocate(0, 0));
     
     MetaAllocatorHandle* final = allocate(defaultPagesInHeap * pageSize());
-    EXPECT_EQ(final->start(), reinterpret_cast<void*>(basePage * pageSize()));
+    EXPECT_EQ(final->start().untaggedPtr(), reinterpret_cast<void*>(basePage * pageSize()));
     free(final);
 }
 
@@ -955,3 +955,14 @@
 }
 
 } // namespace TestWebKitAPI
+
+#if USE(POINTER_PROFILING)
+
+namespace WTF {
+
+const char* tagForPtr(const void*) { return "<unknown>"; }
+const char* ptrTagName(PtrTag) { return "<unknown>"; }
+
+} // namespace WTF
+
+#endif // USE(POINTER_PROFILING)
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to