- Revision
- 198594
- Author
- [email protected]
- Date
- 2016-03-23 14:07:35 -0700 (Wed, 23 Mar 2016)
Log Message
bmalloc: process the object log before asking for new memory
https://bugs.webkit.org/show_bug.cgi?id=155801
Reviewed by Gavin Barraclough.
This is a step toward merging large and small objects: In future, if we
have large objects in the log, we need to process them right away to
avoid pushing up peak memory use.
But it also appears to be a speedup and memory use improvement now.
* bmalloc/Allocator.cpp:
(bmalloc::Allocator::allocate):
(bmalloc::Allocator::refillAllocatorSlowCase):
(bmalloc::Allocator::allocateLarge): Process the log before asking for
more memory.
* bmalloc/Deallocator.cpp:
(bmalloc::Deallocator::processObjectLog):
(bmalloc::Deallocator::deallocateSlowCase):
* bmalloc/Deallocator.h: Provide a public API for processing the object log.
* bmalloc/Heap.cpp:
(bmalloc::Heap::allocateSmallPage): Pop fragmented pages from the front
instead of from the back. This resolves a regression on tree_churn
--parallel. Popping from the front gives us the oldest pages. The oldest
pages have had the most time to accumulate free lines. They are therefore
the least fragmented on average.
* bmalloc/List.h:
(bmalloc::List::popFront):
(bmalloc::List::insertAfter): New API to pop from front.
Modified Paths
Diff
Modified: trunk/Source/bmalloc/ChangeLog (198593 => 198594)
--- trunk/Source/bmalloc/ChangeLog 2016-03-23 21:06:36 UTC (rev 198593)
+++ trunk/Source/bmalloc/ChangeLog 2016-03-23 21:07:35 UTC (rev 198594)
@@ -1,3 +1,38 @@
+2016-03-23 Geoffrey Garen <[email protected]>
+
+ bmalloc: process the object log before asking for new memory
+ https://bugs.webkit.org/show_bug.cgi?id=155801
+
+ Reviewed by Gavin Barraclough.
+
+ This is a step toward merging large and small objects: In future, if we
+ have large objects in the log, we need to process them right away to
+ avoid pushing up peak memory use.
+
+ But it also appears to be a speedup and memory use improvement now.
+
+ * bmalloc/Allocator.cpp:
+ (bmalloc::Allocator::allocate):
+ (bmalloc::Allocator::refillAllocatorSlowCase):
+ (bmalloc::Allocator::allocateLarge): Process the log before asking for
+ more memory.
+
+ * bmalloc/Deallocator.cpp:
+ (bmalloc::Deallocator::processObjectLog):
+ (bmalloc::Deallocator::deallocateSlowCase):
+ * bmalloc/Deallocator.h: Provide a public API for processing the object log.
+
+ * bmalloc/Heap.cpp:
+ (bmalloc::Heap::allocateSmallPage): Pop fragmented pages from the front
+ instead of from the back. This resolves a regression on tree_churn
+ --parallel. Popping from the front gives us the oldest pages. The oldest
+ pages have had the most time to accumulate free lines. They are therefore
+ the least fragmented on average.
+
+ * bmalloc/List.h:
+ (bmalloc::List::popFront):
+ (bmalloc::List::insertAfter): New API to pop from front.
+
2016-03-22 Geoffrey Garen <[email protected]>
bmalloc: use a log scale for large-ish size classes
Modified: trunk/Source/bmalloc/bmalloc/Allocator.cpp (198593 => 198594)
--- trunk/Source/bmalloc/bmalloc/Allocator.cpp 2016-03-23 21:06:36 UTC (rev 198593)
+++ trunk/Source/bmalloc/bmalloc/Allocator.cpp 2016-03-23 21:07:35 UTC (rev 198594)
@@ -93,6 +93,7 @@
size_t unalignedSize = largeMin + alignment - largeAlignment + size;
if (unalignedSize <= largeMax && alignment <= largeChunkSize / 2) {
std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ m_deallocator.processObjectLog(lock);
return PerProcess<Heap>::getFastCase()->allocateLarge(lock, alignment, size, unalignedSize);
}
}
@@ -186,6 +187,7 @@
BumpRangeCache& bumpRangeCache = m_bumpRangeCaches[sizeClass];
std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ m_deallocator.processObjectLog(lock);
PerProcess<Heap>::getFastCase()->allocateSmallBumpRanges(lock, sizeClass, allocator, bumpRangeCache);
}
@@ -200,7 +202,9 @@
NO_INLINE void* Allocator::allocateLarge(size_t size)
{
size = roundUpToMultipleOf<largeAlignment>(size);
+
std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ m_deallocator.processObjectLog(lock);
return PerProcess<Heap>::getFastCase()->allocateLarge(lock, size);
}
Modified: trunk/Source/bmalloc/bmalloc/Deallocator.cpp (198593 => 198594)
--- trunk/Source/bmalloc/bmalloc/Deallocator.cpp 2016-03-23 21:06:36 UTC (rev 198593)
+++ trunk/Source/bmalloc/bmalloc/Deallocator.cpp 2016-03-23 21:07:35 UTC (rev 198594)
@@ -71,9 +71,8 @@
PerProcess<Heap>::getFastCase()->deallocateXLarge(lock, object);
}
-void Deallocator::processObjectLog()
+void Deallocator::processObjectLog(std::lock_guard<StaticMutex>& lock)
{
- std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
Heap* heap = PerProcess<Heap>::getFastCase();
for (auto* object : m_objectLog) {
@@ -84,6 +83,12 @@
m_objectLog.clear();
}
+void Deallocator::processObjectLog()
+{
+ std::lock_guard<StaticMutex> lock(PerProcess<Heap>::mutex());
+ processObjectLog(lock);
+}
+
void Deallocator::deallocateSlowCase(void* object)
{
BASSERT(!deallocateFastCase(object));
Modified: trunk/Source/bmalloc/bmalloc/Deallocator.h (198593 => 198594)
--- trunk/Source/bmalloc/bmalloc/Deallocator.h 2016-03-23 21:06:36 UTC (rev 198593)
+++ trunk/Source/bmalloc/bmalloc/Deallocator.h 2016-03-23 21:07:35 UTC (rev 198594)
@@ -27,10 +27,12 @@
#define Deallocator_h
#include "FixedVector.h"
+#include <mutex>
namespace bmalloc {
class Heap;
+class StaticMutex;
// Per-cache object deallocator.
@@ -42,13 +44,15 @@
void deallocate(void*);
void scavenge();
+ void processObjectLog();
+ void processObjectLog(std::lock_guard<StaticMutex>&);
+
private:
bool deallocateFastCase(void*);
void deallocateSlowCase(void*);
void deallocateLarge(void*);
void deallocateXLarge(void*);
- void processObjectLog();
FixedVector<void*, deallocatorLogCapacity> m_objectLog;
bool m_isBmallocEnabled;
Modified: trunk/Source/bmalloc/bmalloc/Heap.cpp (198593 => 198594)
--- trunk/Source/bmalloc/bmalloc/Heap.cpp 2016-03-23 21:06:36 UTC (rev 198593)
+++ trunk/Source/bmalloc/bmalloc/Heap.cpp 2016-03-23 21:07:35 UTC (rev 198594)
@@ -178,7 +178,7 @@
SmallPage* Heap::allocateSmallPage(std::lock_guard<StaticMutex>& lock, size_t sizeClass)
{
if (!m_smallPagesWithFreeLines[sizeClass].isEmpty())
- return m_smallPagesWithFreeLines[sizeClass].pop();
+ return m_smallPagesWithFreeLines[sizeClass].popFront();
SmallPage* page = [this, &lock]() {
if (!m_smallPages.isEmpty())
Modified: trunk/Source/bmalloc/bmalloc/List.h (198593 => 198594)
--- trunk/Source/bmalloc/bmalloc/List.h 2016-03-23 21:06:36 UTC (rev 198593)
+++ trunk/Source/bmalloc/bmalloc/List.h 2016-03-23 21:07:35 UTC (rev 198594)
@@ -62,6 +62,13 @@
return static_cast<T*>(result);
}
+ T* popFront()
+ {
+ ListNode<T>* result = head();
+ remove(result);
+ return static_cast<T*>(result);
+ }
+
void insertAfter(ListNode<T>* it, ListNode<T>* node)
{
ListNode<T>* prev = it;