Title: [265416] trunk/Source
Revision
265416
Author
[email protected]
Date
2020-08-09 04:49:11 -0700 (Sun, 09 Aug 2020)

Log Message

Unreviewed, reverting r263195, r263252, and r265394.
https://bugs.webkit.org/show_bug.cgi?id=215312

Revert all related GC Bitmap changes because some of perf is
not fully recovered

Reverted changesets:

"Replace JSC::FreeList linked list with a Bitmap."
https://bugs.webkit.org/show_bug.cgi?id=213071
https://trac.webkit.org/changeset/263195

"Unify Bitmap math loops in
MarkedBlock::Handle::specializedSweep()."
https://bugs.webkit.org/show_bug.cgi?id=213345
https://trac.webkit.org/changeset/263252

"[JSC] Disable ENABLE_BITMAP_FREELIST"
https://bugs.webkit.org/show_bug.cgi?id=215285
https://trac.webkit.org/changeset/265394

Modified Paths

Diff

Modified: trunk/Source/_javascript_Core/ChangeLog (265415 => 265416)


--- trunk/Source/_javascript_Core/ChangeLog	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/_javascript_Core/ChangeLog	2020-08-09 11:49:11 UTC (rev 265416)
@@ -1,3 +1,26 @@
+2020-08-09  Commit Queue  <[email protected]>
+
+        Unreviewed, reverting r263195, r263252, and r265394.
+        https://bugs.webkit.org/show_bug.cgi?id=215312
+
+        Revert all related GC Bitmap changes because some of perf is
+        not fully recovered
+
+        Reverted changesets:
+
+        "Replace JSC::FreeList linked list with a Bitmap."
+        https://bugs.webkit.org/show_bug.cgi?id=213071
+        https://trac.webkit.org/changeset/263195
+
+        "Unify Bitmap math loops in
+        MarkedBlock::Handle::specializedSweep()."
+        https://bugs.webkit.org/show_bug.cgi?id=213345
+        https://trac.webkit.org/changeset/263252
+
+        "[JSC] Disable ENABLE_BITMAP_FREELIST"
+        https://bugs.webkit.org/show_bug.cgi?id=215285
+        https://trac.webkit.org/changeset/265394
+
 2020-08-08  Yusuke Suzuki  <[email protected]>
 
         [JSC] Speculate children first in DFG NewArray

Modified: trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp (265415 => 265416)


--- trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/_javascript_Core/ftl/FTLLowerDFGToB3.cpp	2020-08-09 11:49:11 UTC (rev 265416)
@@ -15204,14 +15204,7 @@
             patchpoint->numGPScratchRegisters++;
         else
             patchpoint->appendSomeRegisterWithClobber(allocator);
-#if ENABLE(BITMAP_FREELIST)
-        constexpr unsigned scratchRegistersNeeded = 3;
-        constexpr unsigned allocatorScratch = 3;
-#else
-        constexpr unsigned scratchRegistersNeeded = 1;
-        constexpr unsigned allocatorScratch = 1;
-#endif
-        patchpoint->numGPScratchRegisters += scratchRegistersNeeded;
+        patchpoint->numGPScratchRegisters++;
         patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister };
         
         m_out.appendSuccessor(usually(continuation));
@@ -15224,7 +15217,7 @@
                 
                 GPRReg allocatorGPR;
                 if (actualAllocator.isConstant())
-                    allocatorGPR = params.gpScratch(allocatorScratch);
+                    allocatorGPR = params.gpScratch(1);
                 else
                     allocatorGPR = params[1].gpr();
                 
@@ -15236,11 +15229,7 @@
                 // all of the compiler tiers.
                 jit.emitAllocateWithNonNullAllocator(
                     params[0].gpr(), actualAllocator, allocatorGPR, params.gpScratch(0),
-                    jumpToSlowPath
-#if ENABLE(BITMAP_FREELIST)
-                    , params.gpScratch(1), params.gpScratch(2)
-#endif
-                    );
+                    jumpToSlowPath);
                 
                 CCallHelpers::Jump jumpToSuccess;
                 if (!params.fallsThroughToSuccessor(0))

Modified: trunk/Source/_javascript_Core/heap/FreeList.cpp (265415 => 265416)


--- trunk/Source/_javascript_Core/heap/FreeList.cpp	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/_javascript_Core/heap/FreeList.cpp	2020-08-09 11:49:11 UTC (rev 265416)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016-2020 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -39,48 +39,13 @@
 
 void FreeList::clear()
 {
-#if ENABLE(BITMAP_FREELIST)
-    m_currentRowBitmap = 0;
-    m_currentRowIndex = 0;
-#else
     m_scrambledHead = 0;
     m_secret = 0;
-#endif
     m_payloadEnd = nullptr;
     m_remaining = 0;
     m_originalSize = 0;
 }
 
-#if ENABLE(BITMAP_FREELIST)
-
-void FreeList::initializeAtomsBitmap(MarkedBlock::Handle* block, AtomsBitmap& freeAtoms, unsigned bytes)
-{
-#if ASSERT_ENABLED
-    m_markedBlock = block;
-#endif
-    ASSERT_UNUSED(freeAtoms, &freeAtoms == &m_bitmap);
-    // m_bitmap has already been filled in by MarkedBlock::Handle::specializedSweep().
-
-    m_currentRowBitmap = 0;
-    size_t rowIndex = AtomsBitmap::numberOfWords;
-    while (rowIndex--) {
-        auto rowBitmap = m_bitmap.wordAt(rowIndex);
-        if (rowBitmap) {
-            m_currentRowBitmap = rowBitmap;
-            break;
-        }
-    }
-    ASSERT(m_currentRowBitmap || m_bitmap.isEmpty());
-    m_currentRowIndex = m_currentRowBitmap ? rowIndex : 0;
-
-    size_t firstAtomInRow = m_currentRowIndex * atomsPerRow;
-    m_currentMarkedBlockRowAddress = bitwise_cast<Atom*>(block->atomAt(firstAtomInRow));
-    m_originalSize = bytes;
-}
-
-#else
-// Linked List implementation.
-
 void FreeList::initializeList(FreeCell* head, uintptr_t secret, unsigned bytes)
 {
     // It's *slightly* more optimal to use a scrambled head. It saves a register on the fast path.
@@ -91,23 +56,16 @@
     m_originalSize = bytes;
 }
 
-#endif // ENABLE(BITMAP_FREELIST)
-
 void FreeList::initializeBump(char* payloadEnd, unsigned remaining)
 {
-#if ENABLE(BITMAP_FREELIST)
-    m_currentRowBitmap = 0;
-    m_currentRowIndex = 0;
-#else
     m_scrambledHead = 0;
     m_secret = 0;
-#endif
     m_payloadEnd = payloadEnd;
     m_remaining = remaining;
     m_originalSize = remaining;
 }
 
-bool FreeList::contains(HeapCell* target, MarkedBlock::Handle* currentBlock) const
+bool FreeList::contains(HeapCell* target) const
 {
     if (m_remaining) {
         const void* start = (m_payloadEnd - m_remaining);
@@ -115,31 +73,6 @@
         return (start <= target) && (target < end);
     }
 
-#if ENABLE(BITMAP_FREELIST)
-    if (bitmapIsEmpty())
-        return false;
-
-    // currentBlock may be null if the allocator has been reset (and therefore,
-    // the FreeList cleared. Hence, we should only check this assertion after
-    // we check if the FreeList bitmap is empty above.
-    ASSERT(m_markedBlock == currentBlock);
-    if (!currentBlock->contains(target))
-        return false;
-
-    unsigned atomNumber = currentBlock->block().atomNumber(target);
-    unsigned rowIndex = atomNumber / atomsPerRow;
-    if (rowIndex > m_currentRowIndex)
-        return false;
-    if (rowIndex == m_currentRowIndex) {
-        constexpr AtomsBitmap::Word _one_ = 1;
-        unsigned firstAtomInRow = rowIndex * atomsPerRow;
-        unsigned atomIndexInRow = atomNumber - firstAtomInRow;
-        return m_currentRowBitmap & (one << atomIndexInRow);
-    }
-    return m_bitmap.get(atomNumber);
-
-#else
-    UNUSED_PARAM(currentBlock);
     FreeCell* candidate = head();
     while (candidate) {
         if (bitwise_cast<HeapCell*>(candidate) == target)
@@ -146,20 +79,13 @@
             return true;
         candidate = candidate->next(m_secret);
     }
+
     return false;
-#endif
 }
 
 void FreeList::dump(PrintStream& out) const
 {
-#if ENABLE(BITMAP_FREELIST)
-    if (m_remaining)
-        out.print("{payloadEnd = ", RawPointer(m_payloadEnd), ", remaining = ", m_remaining, ", originalSize = ", m_originalSize, "}");
-    else
-        out.print("{currentRowBitmap = ", m_currentRowBitmap, ", currentRowIndex = ", m_currentRowIndex, ", originalSize = ", m_originalSize, "}");
-#else
     out.print("{head = ", RawPointer(head()), ", secret = ", m_secret, ", payloadEnd = ", RawPointer(m_payloadEnd), ", remaining = ", m_remaining, ", originalSize = ", m_originalSize, "}");
-#endif
 }
 
 } // namespace JSC

Modified: trunk/Source/_javascript_Core/heap/FreeList.h (265415 => 265416)


--- trunk/Source/_javascript_Core/heap/FreeList.h	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/_javascript_Core/heap/FreeList.h	2020-08-09 11:49:11 UTC (rev 265416)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2016-2020 Apple Inc. All rights reserved.
+ * Copyright (C) 2016-2019 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -25,25 +25,13 @@
 
 #pragma once
 
-#include "MarkedBlock.h"
 #include <wtf/Noncopyable.h>
 #include <wtf/PrintStream.h>
-#include <wtf/StdIntExtras.h>
 
 namespace JSC {
 
 class HeapCell;
 
-// Currently it is disabled because of Speedometer2 regression.
-// FIXME: We should attempt to enable it again or remove the code.
-// https://bugs.webkit.org/show_bug.cgi?id=215284
-#if CPU(X86_64)
-#define ENABLE_BITMAP_FREELIST 0
-#else
-#define ENABLE_BITMAP_FREELIST 0
-#endif
-
-#if !ENABLE(BITMAP_FREELIST)
 struct FreeCell {
     static uintptr_t scramble(FreeCell* cell, uintptr_t secret)
     {
@@ -70,7 +58,6 @@
     uint64_t preservedBitsForCrashAnalysis;
     uintptr_t scrambledNext;
 };
-#endif
 
 class FreeList {
 public:
@@ -79,14 +66,16 @@
     
     void clear();
     
+    JS_EXPORT_PRIVATE void initializeList(FreeCell* head, uintptr_t secret, unsigned bytes);
     JS_EXPORT_PRIVATE void initializeBump(char* payloadEnd, unsigned remaining);
     
+    bool allocationWillFail() const { return !head() && !m_remaining; }
     bool allocationWillSucceed() const { return !allocationWillFail(); }
     
     template<typename Func>
     HeapCell* allocate(const Func& slowPath);
     
-    bool contains(HeapCell*, MarkedBlock::Handle* currentBlock) const;
+    bool contains(HeapCell*) const;
     
     template<typename Func>
     void forEach(const Func&) const;
@@ -93,47 +82,11 @@
     
     unsigned originalSize() const { return m_originalSize; }
 
-#if ENABLE(BITMAP_FREELIST)
-    using Atom = MarkedBlock::Atom;
-    using AtomsBitmap = MarkedBlock::AtomsBitmap;
-
-    static constexpr size_t atomsPerRow = AtomsBitmap::bitsInWord;
-    static constexpr size_t atomsRowBytes = atomsPerRow * sizeof(Atom);
-    static constexpr unsigned atomSizeShift = WTF::log2Constexpr(sizeof(Atom));
-    static_assert((static_cast<size_t>(1) << atomSizeShift) == sizeof(Atom));
-
-    JS_EXPORT_PRIVATE void initializeAtomsBitmap(MarkedBlock::Handle*, AtomsBitmap& freeAtoms, unsigned bytes);
-
-    bool bitmapIsEmpty() const
-    {
-        // Remember, we don't actually clear the bits in m_bitmap as we allocate
-        // the atoms. Instead, m_currentRowBitmap and m_currentRowIndex tells us
-        // if there are atoms still available for allocation. See comment blob below
-        // at the declaration of m_currentRowIndex for more details.
-        return !m_currentRowBitmap && !m_currentRowIndex;
-    }
-    bool allocationWillFail() const { return bitmapIsEmpty() && !m_remaining; }
-
-    static ptrdiff_t offsetOfCurrentRowBitmap() { return OBJECT_OFFSETOF(FreeList, m_currentRowBitmap); }
-
-    // We're deliberately returning the address of 1 word before m_bitmap so that
-    // we can schedule instructions better i.e. to do a load before decrementing the
-    // row index.
-    static ptrdiff_t offsetOfBitmapRowsMinusOne() { return OBJECT_OFFSETOF(FreeList, m_bitmap) - sizeof(AtomsBitmap::Word); }
-
-    static ptrdiff_t offsetOfCurrentRowIndex() { return OBJECT_OFFSETOF(FreeList, m_currentRowIndex); }
-    static ptrdiff_t offsetOfCurrentMarkedBlockRowAddress() { return OBJECT_OFFSETOF(FreeList, m_currentMarkedBlockRowAddress); }
-#else
-    JS_EXPORT_PRIVATE void initializeList(FreeCell* head, uintptr_t secret, unsigned bytes);
-
-    bool allocationWillFail() const { return !head() && !m_remaining; }
-
     static ptrdiff_t offsetOfScrambledHead() { return OBJECT_OFFSETOF(FreeList, m_scrambledHead); }
     static ptrdiff_t offsetOfSecret() { return OBJECT_OFFSETOF(FreeList, m_secret); }
-#endif
-
     static ptrdiff_t offsetOfPayloadEnd() { return OBJECT_OFFSETOF(FreeList, m_payloadEnd); }
     static ptrdiff_t offsetOfRemaining() { return OBJECT_OFFSETOF(FreeList, m_remaining); }
+    static ptrdiff_t offsetOfOriginalSize() { return OBJECT_OFFSETOF(FreeList, m_originalSize); }
     static ptrdiff_t offsetOfCellSize() { return OBJECT_OFFSETOF(FreeList, m_cellSize); }
     
     JS_EXPORT_PRIVATE void dump(PrintStream&) const;
@@ -141,74 +94,14 @@
     unsigned cellSize() const { return m_cellSize; }
     
 private:
-
-#if ENABLE(BITMAP_FREELIST)
-    AtomsBitmap& atomsBitmap() { return m_bitmap; }
-    AtomsBitmap::Word* bitmapRowsMinusOne() const
-    {
-        // See comment about offsetOfBitmapRowsMinusOne().
-        return bitwise_cast<AtomsBitmap::Word*>(&m_bitmap) - 1;
-    }
-
-    // This allocation algorithm thinks of the MarkedBlock as consisting of rows
-    // of atoms, where the number of atoms in a row equals the number of bits in
-    // a AtomsBitmap::Word. On 64-bit CPUs, this would be 64.
-    //
-    // We will start allocating from the last (highest numbered) row down to the
-    // first (row 0). As we allocate, we will only update m_currentRowIndex and
-    // m_currentRowBitmap. m_bitmap will not be updated. This is so in oder to
-    // reduce the number of instructions executed during an allocation.
-    //
-    // When m_currentRowIndex points to N, the AtomsBitmap::Word for row N in
-    // m_bitmap will have been copied into m_currentRowBitmap. This is the row
-    // that we will be allocating from until the row is exhausted.
-    //
-    // This is how we know whether an atom is available for allocation or not:
-    // 1. Atoms in any rows above m_currentRowIndex are guaranteed to be
-    //    allocated already (because we allocate downwards), and hence, are not
-    //    available.
-    // 2. For row m_currentRowIndex, m_currentRowBitmap is the source of truth
-    //    on which atoms in the row are available for allocation.
-    // 3. For rows below m_currentRowIndex, m_bitmap is the source of truth on
-    //    which atoms are available for allocation.
-    //
-    // When m_currentRowIndex reaches 0, the info in m_bitmap is completely
-    // obsoleted, and m_currentRowBitmap holds the availability info for row 0.
-    // When both m_currentRowIndex and m_currentRowBitmap are 0, then we have
-    // completely exhausted the block and no more atoms are available for
-    // allocation.
-
-    AtomsBitmap::Word m_currentRowBitmap { 0 };
-    unsigned m_currentRowIndex { 0 };
-    unsigned m_originalSize { 0 };
-
-#else
     FreeCell* head() const { return FreeCell::descramble(m_scrambledHead, m_secret); }
-
+    
     uintptr_t m_scrambledHead { 0 };
     uintptr_t m_secret { 0 };
-#endif
-
-    union {
-        char* m_payloadEnd { nullptr };
-#if ENABLE(BITMAP_FREELIST)
-        Atom* m_currentMarkedBlockRowAddress;
-#endif
-    };
+    char* m_payloadEnd { nullptr };
     unsigned m_remaining { 0 };
+    unsigned m_originalSize { 0 };
     unsigned m_cellSize { 0 };
-
-#if ENABLE(BITMAP_FREELIST)
-    AtomsBitmap m_bitmap;
-#else
-    unsigned m_originalSize { 0 };
-#endif
-
-#if ASSERT_ENABLED
-    MarkedBlock::Handle* m_markedBlock { nullptr };
-#endif
-
-    friend class MarkedBlock;
 };
 
 } // namespace JSC

Modified: trunk/Source/_javascript_Core/heap/FreeListInlines.h (265415 => 265416)


--- trunk/Source/_javascript_Core/heap/FreeListInlines.h	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/_javascript_Core/heap/FreeListInlines.h	2020-08-09 11:49:11 UTC (rev 265416)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2017-2020 Apple Inc. All rights reserved.
+ * Copyright (C) 2017 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -26,7 +26,7 @@
 #pragma once
 
 #include "FreeList.h"
-#include <wtf/MathExtras.h>
+#include "MarkedBlock.h"
 
 namespace JSC {
 
@@ -40,39 +40,7 @@
         m_remaining = remaining;
         return bitwise_cast<HeapCell*>(m_payloadEnd - remaining - cellSize);
     }
-
-#if ENABLE(BITMAP_FREELIST)
-    AtomsBitmap::Word rowBitmap = m_currentRowBitmap;
-    do {
-        if (rowBitmap) {
-            constexpr AtomsBitmap::Word _one_ = 1;
-            unsigned atomIndexInRow = ctz(rowBitmap);
-            auto* cell = bitwise_cast<HeapCell*>(&m_currentMarkedBlockRowAddress[atomIndexInRow]);
-            rowBitmap &= ~(one << atomIndexInRow);
-            m_currentRowBitmap = rowBitmap;
-            return cell;
-        }
-
-        unsigned rowIndex = m_currentRowIndex;
-        auto* rowAddress = m_currentMarkedBlockRowAddress;
-        while (rowIndex) {
-            // We load before decrementing rowIndex because bitmapRowsMinusOne() points
-            // to 1 word before m_bitmap. See comments about offsetOfBitmapRowsMinusOne()
-            // for why we do this.
-            rowBitmap = bitmapRowsMinusOne()[rowIndex--];
-            rowAddress -= atomsPerRow;
-            if (rowBitmap)
-                break;
-        }
-        m_currentMarkedBlockRowAddress = rowAddress;
-        m_currentRowIndex = rowIndex;
-    } while (rowBitmap);
-
-    m_currentRowBitmap = rowBitmap;
-    ASSERT(bitmapIsEmpty());
-    return slowPath();
-
-#else // !ENABLE(BITMAP_FREELIST)
+    
     FreeCell* result = head();
     if (UNLIKELY(!result))
         return slowPath();
@@ -79,7 +47,6 @@
     
     m_scrambledHead = result->scrambledNext;
     return bitwise_cast<HeapCell*>(result);
-#endif // !ENABLE(BITMAP_FREELIST)
 }
 
 template<typename Func>
@@ -89,33 +56,6 @@
         for (unsigned remaining = m_remaining; remaining; remaining -= m_cellSize)
             func(bitwise_cast<HeapCell*>(m_payloadEnd - remaining));
     } else {
-#if ENABLE(BITMAP_FREELIST)
-        if (bitmapIsEmpty())
-            return;
-
-        AtomsBitmap::Word rowBitmap = m_currentRowBitmap;
-        unsigned rowIndex = m_currentRowIndex;
-        Atom* currentMarkedBlockRowAddress = m_currentMarkedBlockRowAddress;
-        do {
-            while (rowBitmap) {
-                constexpr AtomsBitmap::Word _one_ = 1;
-                unsigned atomIndexInRow = ctz(rowBitmap);
-                auto* cell = bitwise_cast<HeapCell*>(&currentMarkedBlockRowAddress[atomIndexInRow]);
-                rowBitmap &= ~(one << atomIndexInRow);
-                func(cell);
-            }
-
-            while (rowIndex) {
-                // We load before decrementing rowIndex because bitmapRowsMinusOne() points
-                // to 1 word before m_bitmap. See comments about offsetOfBitmapRowsMinusOne()
-                // for why we do this.
-                rowBitmap = bitmapRowsMinusOne()[rowIndex--];
-                currentMarkedBlockRowAddress -= atomsPerRow;
-                if (rowBitmap)
-                    break;
-            }
-        } while (rowBitmap);
-#else
         for (FreeCell* cell = head(); cell;) {
             // We can use this to overwrite free objects before destroying the free list. So, we need
             // to get next before proceeding further.
@@ -123,8 +63,8 @@
             func(bitwise_cast<HeapCell*>(cell));
             cell = next;
         }
-#endif
     }
 }
 
 } // namespace JSC
+

Modified: trunk/Source/_javascript_Core/heap/LocalAllocator.cpp (265415 => 265416)


--- trunk/Source/_javascript_Core/heap/LocalAllocator.cpp	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/_javascript_Core/heap/LocalAllocator.cpp	2020-08-09 11:49:11 UTC (rev 265416)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2018-2020 Apple Inc. All rights reserved.
+ * Copyright (C) 2018-2019 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -272,7 +272,7 @@
     // if we know that the block owning the object is free-listed, then it's impossible for any
     // objects to be in the dead-but-not-destructed state.
     // FIXME: Get rid of this abomination. https://bugs.webkit.org/show_bug.cgi?id=181655
-    return m_freeList.contains(bitwise_cast<HeapCell*>(target), m_currentBlock);
+    return m_freeList.contains(bitwise_cast<HeapCell*>(target));
 }
 
 } // namespace JSC

Modified: trunk/Source/_javascript_Core/heap/MarkedBlock.h (265415 => 265416)


--- trunk/Source/_javascript_Core/heap/MarkedBlock.h	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/_javascript_Core/heap/MarkedBlock.h	2020-08-09 11:49:11 UTC (rev 265416)
@@ -1,7 +1,7 @@
 /*
  *  Copyright (C) 1999-2000 Harri Porten ([email protected])
  *  Copyright (C) 2001 Peter Kelly ([email protected])
- *  Copyright (C) 2003-2020 Apple Inc. All rights reserved.
+ *  Copyright (C) 2003-2019 Apple Inc. All rights reserved.
  *
  *  This library is free software; you can redistribute it and/or
  *  modify it under the terms of the GNU Lesser General Public
@@ -84,8 +84,6 @@
     static_assert(!(MarkedBlock::atomSize & (MarkedBlock::atomSize - 1)), "MarkedBlock::atomSize must be a power of two.");
     static_assert(!(MarkedBlock::blockSize & (MarkedBlock::blockSize - 1)), "MarkedBlock::blockSize must be a power of two.");
     
-    using AtomsBitmap = Bitmap<atomsPerBlock>;
-
     struct VoidFunctor {
         typedef void ReturnType;
         void returnValue() { }
@@ -205,7 +203,6 @@
         
         void* start() const { return &m_block->atoms()[0]; }
         void* end() const { return &m_block->atoms()[m_endAtom]; }
-        void* atomAt(size_t i) const { return &m_block->atoms()[i]; }
         bool contains(void* p) const { return start() <= p && p < end(); }
 
         void dumpState(PrintStream&);
@@ -297,8 +294,8 @@
         HeapVersion m_markingVersion;
         HeapVersion m_newlyAllocatedVersion;
 
-        AtomsBitmap m_marks;
-        AtomsBitmap m_newlyAllocated;
+        Bitmap<atomsPerBlock> m_marks;
+        Bitmap<atomsPerBlock> m_newlyAllocated;
     };
     
 private:    
@@ -339,7 +336,7 @@
     bool isNewlyAllocated(const void*);
     void setNewlyAllocated(const void*);
     void clearNewlyAllocated(const void*);
-    const AtomsBitmap& newlyAllocated() const;
+    const Bitmap<atomsPerBlock>& newlyAllocated() const;
     
     HeapVersion newlyAllocatedVersion() const { return footer().m_newlyAllocatedVersion; }
     
@@ -377,7 +374,7 @@
     bool isMarkedRaw(const void* p);
     HeapVersion markingVersion() const { return footer().m_markingVersion; }
     
-    const AtomsBitmap& marks() const;
+    const Bitmap<atomsPerBlock>& marks() const;
     
     CountingLock& lock() { return footer().m_lock; }
     
@@ -402,8 +399,6 @@
     
     inline bool marksConveyLivenessDuringMarking(HeapVersion markingVersion);
     inline bool marksConveyLivenessDuringMarking(HeapVersion myMarkingVersion, HeapVersion markingVersion);
-
-    friend class FreeList;
 };
 
 inline MarkedBlock::Footer& MarkedBlock::footer()

Modified: trunk/Source/_javascript_Core/heap/MarkedBlockInlines.h (265415 => 265416)


--- trunk/Source/_javascript_Core/heap/MarkedBlockInlines.h	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/_javascript_Core/heap/MarkedBlockInlines.h	2020-08-09 11:49:11 UTC (rev 265416)
@@ -264,11 +264,6 @@
     
     m_directory->setIsDestructible(NoLockingNecessary, this, false);
     
-    char* startOfLastCell = static_cast<char*>(cellAlign(block.atoms() + m_endAtom - 1));
-    char* payloadEnd = startOfLastCell + cellSize;
-    RELEASE_ASSERT(payloadEnd - MarkedBlock::blockSize <= bitwise_cast<char*>(&block));
-    char* payloadBegin = bitwise_cast<char*>(block.atoms());
-
     if (Options::useBumpAllocator()
         && emptyMode == IsEmpty
         && newlyAllocatedMode == DoesNotHaveNewlyAllocated) {
@@ -285,6 +280,11 @@
                 });
         }
         
+        char* startOfLastCell = static_cast<char*>(cellAlign(block.atoms() + m_endAtom - 1));
+        char* payloadEnd = startOfLastCell + cellSize;
+        RELEASE_ASSERT(payloadEnd - MarkedBlock::blockSize <= bitwise_cast<char*>(&block));
+        char* payloadBegin = bitwise_cast<char*>(block.atoms());
+        
         if (sweepMode == SweepToFreeList)
             setIsFreeListed();
         if (space()->isMarking())
@@ -303,96 +303,6 @@
         return;
     }
 
-#if ENABLE(BITMAP_FREELIST)
-    // The code below is an optimized version of the following by merging the
-    // various loops over the bitmaps.
-    //
-    //    AtomsBitmap cellLocations;
-    //    cellLocations.setEachNthBit(m_atomsPerCell, 0, m_endAtom);
-    //
-    //    if (emptyMode == NotEmpty) {
-    //        if (marksMode == MarksNotStale) {
-    //            freeAtoms = footer.m_marks;
-    //            if (newlyAllocatedMode == HasNewlyAllocated)
-    //                freeAtoms |= footer.m_newlyAllocated;
-    //        } else if (newlyAllocatedMode == HasNewlyAllocated)
-    //            freeAtoms = footer.m_newlyAllocated;
-    //        // At this point, a set bit in freeAtoms represents live cells.
-    //        isEmpty = freeAtoms.isEmpty();
-    //
-    //        // Invert the bits at each cell location so that the ones for live cells
-    //        // are cleared, and the ones for dead cells are set.
-    //        freeAtoms ^= cellLocations;
-    //    } else
-    //        freeAtoms = cellLocations; // all cells are free.
-
-    AtomsBitmap localFreeAtoms;
-    AtomsBitmap& freeAtoms = freeList ? freeList->atomsBitmap() : localFreeAtoms;
-
-    AtomsBitmap::Word* free = freeAtoms.words();
-    AtomsBitmap::Word* marks = footer.m_marks.words();
-    AtomsBitmap::Word* newlyAllocated = footer.m_newlyAllocated.words();
-
-    unsigned roundedUpEndAtoms = roundUpToMultipleOf<AtomsBitmap::bitsInWord>(m_endAtom);
-    unsigned endWordIndex = roundedUpEndAtoms / AtomsBitmap::bitsInWord;
-    ASSERT(m_endAtom <= endWordIndex * AtomsBitmap::bitsInWord);
-
-    if (freeList)
-        freeAtoms.clearAll();
-    freeAtoms.setEachNthBit(m_atomsPerCell, 0, m_endAtom);
-
-    if (emptyMode == NotEmpty) {
-        if (marksMode == MarksNotStale && newlyAllocatedMode == HasNewlyAllocated) {
-            for (unsigned i = 0; i < endWordIndex; ++i)
-                free[i] ^= marks[i] | newlyAllocated[i];
-
-        } else if (marksMode == MarksNotStale) {
-            for (unsigned i = 0; i < endWordIndex; ++i)
-                free[i] ^= marks[i];
-
-        } else if (newlyAllocatedMode == HasNewlyAllocated) {
-            for (unsigned i = 0; i < endWordIndex; ++i)
-                free[i] ^= newlyAllocated[i];
-        }
-    }
-
-    // At this point, a set bit in freeAtoms represents a dead cell.
-
-    // We only want to discard the newlyAllocated bits if we're creating a FreeList,
-    // otherwise we would lose information on what's currently alive.
-    if (sweepMode == SweepToFreeList && newlyAllocatedMode == HasNewlyAllocated)
-        footer.m_newlyAllocatedVersion = MarkedSpace::nullVersion;
-
-    if (space()->isMarking())
-        footer.m_lock.unlock();
-
-    // Handle dead cells.
-    unsigned deadCellCount = 0;
-    freeAtoms.forEachSetBit([&] (size_t i) {
-        HeapCell* cell = reinterpret_cast_ptr<HeapCell*>(atomAt(i));
-
-        if (destructionMode != BlockHasNoDestructors)
-            destroy(cell);
-
-        if (sweepMode == SweepToFreeList) {
-            if (scribbleMode == Scribble)
-                scribble(cell, cellSize);
-        }
-        ++deadCellCount;
-    });
-
-    unsigned numberOfCellsInBlock = (payloadEnd - payloadBegin) / cellSize;
-    bool isEmpty = (deadCellCount == numberOfCellsInBlock);
-    if (sweepMode == SweepToFreeList) {
-        freeList->initializeAtomsBitmap(this, freeAtoms, deadCellCount * cellSize);
-        setIsFreeListed();
-    } else if (isEmpty)
-        m_directory->setIsEmpty(NoLockingNecessary, this, true);
-    if (false)
-        dataLog("Slowly swept block ", RawPointer(&block), " with cell size ", cellSize, " and attributes ", m_attributes, ": ", pointerDump(freeList), "\n");
-
-#else // not ENABLE(BITMAP_FREELIST)
-
     // This produces a free list that is ordered in reverse through the block.
     // This is fine, since the allocation code makes no assumptions about the
     // order of the free list.
@@ -451,7 +361,6 @@
         m_directory->setIsEmpty(NoLockingNecessary, this, true);
     if (false)
         dataLog("Slowly swept block ", RawPointer(&block), " with cell size ", cellSize, " and attributes ", m_attributes, ": ", pointerDump(freeList), "\n");
-#endif // ENABLE(BITMAP_FREELIST)
 }
 
 template<typename DestroyFunc>

Modified: trunk/Source/_javascript_Core/jit/AssemblyHelpers.cpp (265415 => 265416)


--- trunk/Source/_javascript_Core/jit/AssemblyHelpers.cpp	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/_javascript_Core/jit/AssemblyHelpers.cpp	2020-08-09 11:49:11 UTC (rev 265416)
@@ -521,7 +521,7 @@
 }
 #endif
 
-void AssemblyHelpers::emitAllocateWithNonNullAllocator(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath, Optional<GPRReg> optionalScratchGPR2, Optional<GPRReg> optionalScratchGPR3)
+void AssemblyHelpers::emitAllocateWithNonNullAllocator(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
 {
     if (Options::forceGCSlowPaths()) {
         slowPath.append(jump());
@@ -534,7 +534,7 @@
     // - We *can* use RegisterSet::macroScratchRegisters on ARM.
 
     Jump popPath;
-    JumpList done;
+    Jump done;
     
     if (allocator.isConstant())
         move(TrustedImmPtr(allocator.allocator().localAllocator()), allocatorGPR);
@@ -552,124 +552,10 @@
     Address payloadEndAddr = Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfPayloadEnd());
     addPtr(payloadEndAddr, resultGPR);
 
-    done.append(jump());
+    done = jump();
         
-#if ENABLE(BITMAP_FREELIST)
-    ASSERT(resultGPR != scratchGPR);
-
-    auto rowIndexGPR = resultGPR;
-    auto rowBitmapGPR = scratchGPR;
-
-    GPRReg scratchGPR2 = optionalScratchGPR2 ? optionalScratchGPR2.value() : scratchRegister();
-    ASSERT(scratchGPR2 != resultGPR);
-    ASSERT(scratchGPR2 != scratchGPR);
-
-    auto rowAddressGPR = scratchGPR2;
-    auto clearBit64ScratchGPR = scratchGPR2;
-
-    bool canPreloadRowAddressGPR = false;
-    if (optionalScratchGPR3) {
-        clearBit64ScratchGPR = optionalScratchGPR3.value();
-        canPreloadRowAddressGPR = true;
-    } else if (isX86_64()) {
-        // x86_64's clearBit64() does actually need to use clearBit64ScratchGPR.
-        // So, we can preload the row address into it.
-        clearBit64ScratchGPR = InvalidGPRReg;
-        canPreloadRowAddressGPR = true;
-#if CPU(ARM64)
-    } else if (isARM64()) {
-        // ARM64's fast path does actually need to use the memoryTempRegister.
-        // So, we can use that for the clearBit64ScratchGPR and allow the
-        // row address to be preloaded in scratchGPR2.
-        clearBit64ScratchGPR = getCachedMemoryTempRegisterIDAndInvalidate();
-        canPreloadRowAddressGPR = true;
-#endif
-    }
-    ASSERT(clearBit64ScratchGPR != resultGPR);
-    ASSERT(clearBit64ScratchGPR != scratchGPR);
-    if (canPreloadRowAddressGPR)
-        ASSERT(clearBit64ScratchGPR != scratchGPR2);
-
-    // The code below for rowBitmapGPR relies on this.
-    static_assert(sizeof(FreeList::AtomsBitmap::Word) == sizeof(uint64_t));
-
-    // Check for middle path: have another row to visit?
-    Label checkForMoreRows = label();
-
-    load32(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfCurrentRowIndex()), rowIndexGPR);
-
-    if (!canPreloadRowAddressGPR)
-        loadPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfCurrentMarkedBlockRowAddress()), rowAddressGPR);
-
-    slowPath.append(branchTestPtr(Zero, rowIndexGPR));
-
-    // Middle path: there is another row left to visit.
-    Jump foundNonEmptyRow;
-    Label checkNextRow = label();
-    {
-        // Load the next row bitmap and point m_currentMarkedBlockRowAddress to the next row.
-
-        // Note: offsetOfBitmapRowsMinusOne() points to 1 word before m_bitmap. We do this
-        // deliberately because it allows us to schedule instructions better and
-        // do this load before the decrement below.
-        load64(BaseIndex(allocatorGPR, rowIndexGPR, TimesEight, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfBitmapRowsMinusOne()), rowBitmapGPR);
-
-        sub64(TrustedImm32(1), rowIndexGPR);
-        subPtr(TrustedImm32(FreeList::atomsRowBytes), rowAddressGPR);
-
-        foundNonEmptyRow = branchTest64(NonZero, rowBitmapGPR);
-        branchTestPtr(NonZero, rowIndexGPR).linkTo(checkNextRow, this);
-    }
-
-    // Slow path: no more rows.
-    // Both rowIndexGPR and rowBitmapGPR should be null here.
-    store32(rowIndexGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfCurrentRowIndex()));
-    store64(rowBitmapGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfCurrentRowBitmap()));
-    slowPath.append(jump());
-
-    // Transition from middle path back to fast path to allocate.
-    foundNonEmptyRow.link(this);
-    storePtr(rowAddressGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfCurrentMarkedBlockRowAddress()));
-    store32(rowIndexGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfCurrentRowIndex()));
-
-    Jump allocateFromCurrentRow = jump();
-
     popPath.link(this);
-
-    // Check for fast path: have available bit in m_currentRowBitmap?
-    load64(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfCurrentRowBitmap()), rowBitmapGPR);
-
-    if (canPreloadRowAddressGPR) {
-        // Preload the row address needed on the fast and middle path.
-        loadPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfCurrentMarkedBlockRowAddress()), rowAddressGPR);
-    }
-
-    branchTest64(Zero, rowBitmapGPR).linkTo(checkForMoreRows, this);
-
-    // Fast path: we have a bit to use.
-    allocateFromCurrentRow.link(this);
-    {
-        // Remove this bit from m_currentRowBitmap.
-        auto atomIndexInRowGPR = resultGPR;
-        countTrailingZeros64WithoutNullCheck(rowBitmapGPR, atomIndexInRowGPR);
-        clearBit64(atomIndexInRowGPR, rowBitmapGPR, clearBit64ScratchGPR);
-
-        if (!canPreloadRowAddressGPR)
-            loadPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfCurrentMarkedBlockRowAddress()), rowAddressGPR);
-
-        store64(rowBitmapGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfCurrentRowBitmap()));
-
-        // Compute atom address of this bit.
-        ASSERT(resultGPR == atomIndexInRowGPR);
-        shiftAndAdd(rowAddressGPR, resultGPR, FreeList::atomSizeShift, resultGPR);
-    }
-
-#else
-    UNUSED_PARAM(optionalScratchGPR2);
-    UNUSED_PARAM(optionalScratchGPR3);
-
-    popPath.link(this);
-
+        
     loadPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()), resultGPR);
     xorPtr(Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfSecret()), resultGPR);
     slowPath.append(branchTestPtr(Zero, resultGPR));
@@ -678,8 +564,7 @@
     // it's still on the GC's free list.
     loadPtr(Address(resultGPR, FreeCell::offsetOfScrambledNext()), scratchGPR);
     storePtr(scratchGPR, Address(allocatorGPR, LocalAllocator::offsetOfFreeList() + FreeList::offsetOfScrambledHead()));
-#endif
-
+        
     done.link(this);
 }
 

Modified: trunk/Source/_javascript_Core/jit/AssemblyHelpers.h (265415 => 265416)


--- trunk/Source/_javascript_Core/jit/AssemblyHelpers.h	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/_javascript_Core/jit/AssemblyHelpers.h	2020-08-09 11:49:11 UTC (rev 265416)
@@ -44,7 +44,6 @@
 #include "TagRegistersMode.h"
 #include "TypeofType.h"
 #include "VM.h"
-#include <wtf/Optional.h>
 
 namespace JSC {
 
@@ -1953,8 +1952,8 @@
     // that allocator is non-null; allocator can be null as a signal that we don't know what the
     // value of allocatorGPR is. Additionally, if the allocator is not null, then there is no need
     // to populate allocatorGPR - this code will ignore the contents of allocatorGPR.
-    void emitAllocateWithNonNullAllocator(GPRReg resultGPR, const JITAllocator&, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath, Optional<GPRReg> scratchGPR2 = { }, Optional<GPRReg> scratchGPR3 = { });
-
+    void emitAllocateWithNonNullAllocator(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath);
+    
     void emitAllocate(GPRReg resultGPR, const JITAllocator& allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath);
     
     template<typename StructureType>

Modified: trunk/Source/WTF/ChangeLog (265415 => 265416)


--- trunk/Source/WTF/ChangeLog	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/WTF/ChangeLog	2020-08-09 11:49:11 UTC (rev 265416)
@@ -1,3 +1,26 @@
+2020-08-09  Commit Queue  <[email protected]>
+
+        Unreviewed, reverting r263195, r263252, and r265394.
+        https://bugs.webkit.org/show_bug.cgi?id=215312
+
+        Revert all related GC Bitmap changes because some of perf is
+        not fully recovered
+
+        Reverted changesets:
+
+        "Replace JSC::FreeList linked list with a Bitmap."
+        https://bugs.webkit.org/show_bug.cgi?id=213071
+        https://trac.webkit.org/changeset/263195
+
+        "Unify Bitmap math loops in
+        MarkedBlock::Handle::specializedSweep()."
+        https://bugs.webkit.org/show_bug.cgi?id=213345
+        https://trac.webkit.org/changeset/263252
+
+        "[JSC] Disable ENABLE_BITMAP_FREELIST"
+        https://bugs.webkit.org/show_bug.cgi?id=215285
+        https://trac.webkit.org/changeset/265394
+
 2020-08-08  Joonghun Park  <[email protected]>
 
         [WTF] Remove the build warning since r265344.

Modified: trunk/Source/WTF/wtf/Bitmap.h (265415 => 265416)


--- trunk/Source/WTF/wtf/Bitmap.h	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/WTF/wtf/Bitmap.h	2020-08-09 11:49:11 UTC (rev 265416)
@@ -130,14 +130,10 @@
 
     unsigned hash() const;
 
-    // Low level interface.
-    using Word = WordType;
-    static constexpr unsigned bitsInWord = countOfBits<WordType>;
-    static constexpr unsigned numberOfWords = (bitmapSize + bitsInWord - 1) / bitsInWord;
-    WordType wordAt(size_t wordIndex) const { return bits[wordIndex]; }
-    Word* words() { return bitwise_cast<Word*>(&bits); }
+private:
+    static constexpr unsigned wordSize = sizeof(WordType) * 8;
+    static constexpr unsigned words = (bitmapSize + wordSize - 1) / wordSize;
 
-private:
     // the literal '1' is of type signed int.  We want to use an unsigned
     // version of the correct size when doing the calculations because if
     // WordType is larger than int, '1 << 31' will first be sign extended
@@ -145,7 +141,7 @@
     // a 64 bit unsigned int would give 0xffff8000
     static constexpr WordType _one_ = 1;
 
-    std::array<WordType, numberOfWords> bits;
+    std::array<WordType, words> bits;
 };
 
 template<size_t bitmapSize, typename WordType>
@@ -157,13 +153,13 @@
 template<size_t bitmapSize, typename WordType>
 inline bool Bitmap<bitmapSize, WordType>::get(size_t n, Dependency dependency) const
 {
-    return !!(dependency.consume(this)->bits[n / bitsInWord] & (one << (n % bitsInWord)));
+    return !!(dependency.consume(this)->bits[n / wordSize] & (one << (n % wordSize)));
 }
 
 template<size_t bitmapSize, typename WordType>
 inline void Bitmap<bitmapSize, WordType>::set(size_t n)
 {
-    bits[n / bitsInWord] |= (one << (n % bitsInWord));
+    bits[n / wordSize] |= (one << (n % wordSize));
 }
 
 template<size_t bitmapSize, typename WordType>
@@ -178,8 +174,8 @@
 template<size_t bitmapSize, typename WordType>
 inline bool Bitmap<bitmapSize, WordType>::testAndSet(size_t n)
 {
-    WordType mask = one << (n % bitsInWord);
-    size_t index = n / bitsInWord;
+    WordType mask = one << (n % wordSize);
+    size_t index = n / wordSize;
     bool result = bits[index] & mask;
     bits[index] |= mask;
     return result;
@@ -188,8 +184,8 @@
 template<size_t bitmapSize, typename WordType>
 inline bool Bitmap<bitmapSize, WordType>::testAndClear(size_t n)
 {
-    WordType mask = one << (n % bitsInWord);
-    size_t index = n / bitsInWord;
+    WordType mask = one << (n % wordSize);
+    size_t index = n / wordSize;
     bool result = bits[index] & mask;
     bits[index] &= ~mask;
     return result;
@@ -198,8 +194,8 @@
 template<size_t bitmapSize, typename WordType>
 ALWAYS_INLINE bool Bitmap<bitmapSize, WordType>::concurrentTestAndSet(size_t n, Dependency dependency)
 {
-    WordType mask = one << (n % bitsInWord);
-    size_t index = n / bitsInWord;
+    WordType mask = one << (n % wordSize);
+    size_t index = n / wordSize;
     WordType* data = "" + index;
     return !bitwise_cast<Atomic<WordType>*>(data)->transactionRelaxed(
         [&] (WordType& value) -> bool {
@@ -214,8 +210,8 @@
 template<size_t bitmapSize, typename WordType>
 ALWAYS_INLINE bool Bitmap<bitmapSize, WordType>::concurrentTestAndClear(size_t n, Dependency dependency)
 {
-    WordType mask = one << (n % bitsInWord);
-    size_t index = n / bitsInWord;
+    WordType mask = one << (n % wordSize);
+    size_t index = n / wordSize;
     WordType* data = "" + index;
     return !bitwise_cast<Atomic<WordType>*>(data)->transactionRelaxed(
         [&] (WordType& value) -> bool {
@@ -230,7 +226,7 @@
 template<size_t bitmapSize, typename WordType>
 inline void Bitmap<bitmapSize, WordType>::clear(size_t n)
 {
-    bits[n / bitsInWord] &= ~(one << (n % bitsInWord));
+    bits[n / wordSize] &= ~(one << (n % wordSize));
 }
 
 template<size_t bitmapSize, typename WordType>
@@ -242,12 +238,12 @@
 template<size_t bitmapSize, typename WordType>
 inline void Bitmap<bitmapSize, WordType>::invert()
 {
-    for (size_t i = 0; i < numberOfWords; ++i)
+    for (size_t i = 0; i < words; ++i)
         bits[i] = ~bits[i];
-    if constexpr (!!(bitmapSize % bitsInWord)) {
-        constexpr size_t remainingBits = bitmapSize % bitsInWord;
+    if constexpr (!!(bitmapSize % wordSize)) {
+        constexpr size_t remainingBits = bitmapSize % wordSize;
         constexpr WordType mask = (static_cast<WordType>(1) << remainingBits) - 1;
-        bits[numberOfWords - 1] &= mask;
+        bits[words - 1] &= mask;
     }
 }
 
@@ -254,8 +250,8 @@
 template<size_t bitmapSize, typename WordType>
 inline size_t Bitmap<bitmapSize, WordType>::nextPossiblyUnset(size_t start) const
 {
-    if (!~bits[start / bitsInWord])
-        return ((start / bitsInWord) + 1) * bitsInWord;
+    if (!~bits[start / wordSize])
+        return ((start / wordSize) + 1) * wordSize;
     return start + 1;
 }
 
@@ -286,11 +282,11 @@
 inline size_t Bitmap<bitmapSize, WordType>::count(size_t start) const
 {
     size_t result = 0;
-    for ( ; (start % bitsInWord); ++start) {
+    for ( ; (start % wordSize); ++start) {
         if (get(start))
             ++result;
     }
-    for (size_t i = start / bitsInWord; i < numberOfWords; ++i)
+    for (size_t i = start / wordSize; i < words; ++i)
         result += WTF::bitCount(bits[i]);
     return result;
 }
@@ -298,10 +294,9 @@
 template<size_t bitmapSize, typename WordType>
 inline bool Bitmap<bitmapSize, WordType>::isEmpty() const
 {
-    for (size_t i = 0; i < numberOfWords; ++i) {
+    for (size_t i = 0; i < words; ++i)
         if (bits[i])
             return false;
-    }
     return true;
 }
 
@@ -308,11 +303,11 @@
 template<size_t bitmapSize, typename WordType>
 inline bool Bitmap<bitmapSize, WordType>::isFull() const
 {
-    for (size_t i = 0; i < numberOfWords; ++i) {
+    for (size_t i = 0; i < words; ++i)
         if (~bits[i]) {
-            if constexpr (!!(bitmapSize % bitsInWord)) {
-                if (i == numberOfWords - 1) {
-                    constexpr size_t remainingBits = bitmapSize % bitsInWord;
+            if constexpr (!!(bitmapSize % wordSize)) {
+                if (i == words - 1) {
+                    constexpr size_t remainingBits = bitmapSize % wordSize;
                     constexpr WordType mask = (static_cast<WordType>(1) << remainingBits) - 1;
                     if ((bits[i] & mask) == mask)
                         return true;
@@ -320,7 +315,6 @@
             }
             return false;
         }
-    }
     return true;
 }
 
@@ -327,7 +321,7 @@
 template<size_t bitmapSize, typename WordType>
 inline void Bitmap<bitmapSize, WordType>::merge(const Bitmap& other)
 {
-    for (size_t i = 0; i < numberOfWords; ++i)
+    for (size_t i = 0; i < words; ++i)
         bits[i] |= other.bits[i];
 }
 
@@ -334,7 +328,7 @@
 template<size_t bitmapSize, typename WordType>
 inline void Bitmap<bitmapSize, WordType>::filter(const Bitmap& other)
 {
-    for (size_t i = 0; i < numberOfWords; ++i)
+    for (size_t i = 0; i < words; ++i)
         bits[i] &= other.bits[i];
 }
 
@@ -341,7 +335,7 @@
 template<size_t bitmapSize, typename WordType>
 inline void Bitmap<bitmapSize, WordType>::exclude(const Bitmap& other)
 {
-    for (size_t i = 0; i < numberOfWords; ++i)
+    for (size_t i = 0; i < words; ++i)
         bits[i] &= ~other.bits[i];
 }
 
@@ -348,7 +342,7 @@
 template<size_t bitmapSize, typename WordType>
 inline void Bitmap<bitmapSize, WordType>::concurrentFilter(const Bitmap& other)
 {
-    for (size_t i = 0; i < numberOfWords; ++i) {
+    for (size_t i = 0; i < words; ++i) {
         for (;;) {
             WordType otherBits = other.bits[i];
             if (!otherBits) {
@@ -368,7 +362,7 @@
 template<size_t bitmapSize, typename WordType>
 inline bool Bitmap<bitmapSize, WordType>::subsumes(const Bitmap& other) const
 {
-    for (size_t i = 0; i < numberOfWords; ++i) {
+    for (size_t i = 0; i < words; ++i) {
         WordType myBits = bits[i];
         WordType otherBits = other.bits[i];
         if ((myBits | otherBits) != myBits)
@@ -381,16 +375,16 @@
 template<typename Func>
 inline void Bitmap<bitmapSize, WordType>::forEachSetBit(const Func& func) const
 {
-    for (size_t i = 0; i < numberOfWords; ++i) {
+    for (size_t i = 0; i < words; ++i) {
         WordType word = bits[i];
-        size_t base = i * bitsInWord;
-        size_t j = 0;
-        for (; word; ++j) {
+        if (!word)
+            continue;
+        size_t base = i * wordSize;
+        for (size_t j = 0; j < wordSize; ++j) {
             if (word & 1)
                 func(base + j);
             word >>= 1;
         }
-        ASSERT(j <= bitsInWord);
     }
 }
 
@@ -398,15 +392,15 @@
 inline size_t Bitmap<bitmapSize, WordType>::findBit(size_t startIndex, bool value) const
 {
     WordType skipValue = -(static_cast<WordType>(value) ^ 1);
-    size_t wordIndex = startIndex / bitsInWord;
-    size_t startIndexInWord = startIndex - wordIndex * bitsInWord;
+    size_t wordIndex = startIndex / wordSize;
+    size_t startIndexInWord = startIndex - wordIndex * wordSize;
     
-    while (wordIndex < numberOfWords) {
+    while (wordIndex < words) {
         WordType word = bits[wordIndex];
         if (word != skipValue) {
             size_t index = startIndexInWord;
-            if (findBitInWord(word, index, bitsInWord, value))
-                return wordIndex * bitsInWord + index;
+            if (findBitInWord(word, index, wordSize, value))
+                return wordIndex * wordSize + index;
         }
         
         wordIndex++;
@@ -419,7 +413,7 @@
 template<size_t bitmapSize, typename WordType>
 inline void Bitmap<bitmapSize, WordType>::mergeAndClear(Bitmap& other)
 {
-    for (size_t i = 0; i < numberOfWords; ++i) {
+    for (size_t i = 0; i < words; ++i) {
         bits[i] |= other.bits[i];
         other.bits[i] = 0;
     }
@@ -428,7 +422,7 @@
 template<size_t bitmapSize, typename WordType>
 inline void Bitmap<bitmapSize, WordType>::setAndClear(Bitmap& other)
 {
-    for (size_t i = 0; i < numberOfWords; ++i) {
+    for (size_t i = 0; i < words; ++i) {
         bits[i] = other.bits[i];
         other.bits[i] = 0;
     }
@@ -440,28 +434,28 @@
     ASSERT(start <= end);
     ASSERT(end <= bitmapSize);
 
-    size_t wordIndex = start / bitsInWord;
-    size_t endWordIndex = end / bitsInWord;
-    size_t index = start - wordIndex * bitsInWord;
+    size_t wordIndex = start / wordSize;
+    size_t endWordIndex = end / wordSize;
+    size_t index = start - wordIndex * wordSize;
     while (wordIndex < endWordIndex) {
-        while (index < bitsInWord) {
+        while (index < wordSize) {
             bits[wordIndex] |= (one << index);
             index += n;
         }
-        index -= bitsInWord;
+        index -= wordSize;
         wordIndex++;
     }
 
-    size_t endIndex = end - endWordIndex * bitsInWord;
+    size_t endIndex = end - endWordIndex * wordSize;
     while (index < endIndex) {
         bits[wordIndex] |= (one << index);
         index += n;
     }
 
-    if constexpr (!!(bitmapSize % bitsInWord)) {
-        constexpr size_t remainingBits = bitmapSize % bitsInWord;
+    if constexpr (!!(bitmapSize % wordSize)) {
+        constexpr size_t remainingBits = bitmapSize % wordSize;
         constexpr WordType mask = (static_cast<WordType>(1) << remainingBits) - 1;
-        bits[numberOfWords - 1] &= mask;
+        bits[words - 1] &= mask;
     }
 }
 
@@ -468,7 +462,7 @@
 template<size_t bitmapSize, typename WordType>
 inline bool Bitmap<bitmapSize, WordType>::operator==(const Bitmap& other) const
 {
-    for (size_t i = 0; i < numberOfWords; ++i) {
+    for (size_t i = 0; i < words; ++i) {
         if (bits[i] != other.bits[i])
             return false;
     }
@@ -484,7 +478,7 @@
 template<size_t bitmapSize, typename WordType>
 inline void Bitmap<bitmapSize, WordType>::operator|=(const Bitmap& other)
 {
-    for (size_t i = 0; i < numberOfWords; ++i)
+    for (size_t i = 0; i < words; ++i)
         bits[i] |= other.bits[i];
 }
 
@@ -491,7 +485,7 @@
 template<size_t bitmapSize, typename WordType>
 inline void Bitmap<bitmapSize, WordType>::operator&=(const Bitmap& other)
 {
-    for (size_t i = 0; i < numberOfWords; ++i)
+    for (size_t i = 0; i < words; ++i)
         bits[i] &= other.bits[i];
 }
 
@@ -498,7 +492,7 @@
 template<size_t bitmapSize, typename WordType>
 inline void Bitmap<bitmapSize, WordType>::operator^=(const Bitmap& other)
 {
-    for (size_t i = 0; i < numberOfWords; ++i)
+    for (size_t i = 0; i < words; ++i)
         bits[i] ^= other.bits[i];
 }
 
@@ -506,7 +500,7 @@
 inline unsigned Bitmap<bitmapSize, WordType>::hash() const
 {
     unsigned result = 0;
-    for (size_t i = 0; i < numberOfWords; ++i)
+    for (size_t i = 0; i < words; ++i)
         result ^= IntHash<WordType>::hash(bits[i]);
     return result;
 }

Modified: trunk/Source/WTF/wtf/MathExtras.h (265415 => 265416)


--- trunk/Source/WTF/wtf/MathExtras.h	2020-08-09 07:36:19 UTC (rev 265415)
+++ trunk/Source/WTF/wtf/MathExtras.h	2020-08-09 11:49:11 UTC (rev 265416)
@@ -593,7 +593,7 @@
 template <typename T>
 constexpr unsigned clzConstexpr(T value)
 {
-    constexpr unsigned bitSize = countOfBits<T>;
+    constexpr unsigned bitSize = sizeof(T) * CHAR_BIT;
 
     using UT = typename std::make_unsigned<T>::type;
     UT uValue = value;
@@ -610,13 +610,13 @@
 template<typename T>
 inline unsigned clz(T value)
 {
-    constexpr unsigned bitSize = countOfBits<T>;
+    constexpr unsigned bitSize = sizeof(T) * CHAR_BIT;
 
     using UT = typename std::make_unsigned<T>::type;
     UT uValue = value;
 
 #if COMPILER(GCC_COMPATIBLE)
-    constexpr unsigned bitSize64 = countOfBits<uint64_t>;
+    constexpr unsigned bitSize64 = sizeof(uint64_t) * CHAR_BIT;
     if (uValue)
         return __builtin_clzll(uValue) - (bitSize64 - bitSize);
     return bitSize;
@@ -638,7 +638,7 @@
 template <typename T>
 constexpr unsigned ctzConstexpr(T value)
 {
-    constexpr unsigned bitSize = countOfBits<T>;
+    constexpr unsigned bitSize = sizeof(T) * CHAR_BIT;
 
     using UT = typename std::make_unsigned<T>::type;
     UT uValue = value;
@@ -657,7 +657,7 @@
 template<typename T>
 inline unsigned ctz(T value)
 {
-    constexpr unsigned bitSize = countOfBits<T>;
+    constexpr unsigned bitSize = sizeof(T) * CHAR_BIT;
 
     using UT = typename std::make_unsigned<T>::type;
     UT uValue = value;
@@ -695,7 +695,7 @@
 template<typename T>
 inline unsigned getMSBSet(T t)
 {
-    constexpr unsigned bitSize = countOfBits<T>;
+    constexpr unsigned bitSize = sizeof(T) * CHAR_BIT;
     ASSERT(t);
     return bitSize - 1 - clz(t);
 }
@@ -703,14 +703,11 @@
 template<typename T>
 constexpr unsigned getMSBSetConstexpr(T t)
 {
-    constexpr unsigned bitSize = countOfBits<T>;
+    constexpr unsigned bitSize = sizeof(T) * CHAR_BIT;
     ASSERT_UNDER_CONSTEXPR_CONTEXT(t);
     return bitSize - 1 - clzConstexpr(t);
 }
 
-template<typename T> unsigned log2(T value) { return getMSBSet(value); }
-template<typename T> constexpr unsigned log2Constexpr(T value) { return getMSBSetConstexpr(value); }
-
 } // namespace WTF
 
 using WTF::shuffleVector;
_______________________________________________
webkit-changes mailing list
[email protected]
https://lists.webkit.org/mailman/listinfo/webkit-changes

Reply via email to