This is an automated email from the ASF dual-hosted git repository.

swebb2066 pushed a commit to branch improve_async_appender
in repository https://gitbox.apache.org/repos/asf/logging-log4cxx.git


The following commit(s) were added to refs/heads/improve_async_appender by this 
push:
     new 98c8ef4a Dynamic memory allocation is 30% of overhead
98c8ef4a is described below

commit 98c8ef4ae83e4732e63502b828f07f9ef0587509
Author: Stephen Webb <[email protected]>
AuthorDate: Fri Mar 8 15:49:40 2024 +1100

    Dynamic memory allocation is 30% of overhead
---
 src/main/cpp/asyncappender.cpp | 30 ++++++++++++++++++++++++++----
 1 file changed, 26 insertions(+), 4 deletions(-)

diff --git a/src/main/cpp/asyncappender.cpp b/src/main/cpp/asyncappender.cpp
index 50eaf789..307abbd9 100644
--- a/src/main/cpp/asyncappender.cpp
+++ b/src/main/cpp/asyncappender.cpp
@@ -99,6 +99,7 @@ typedef std::map<LogString, DiscardSummary> DiscardMap;
 #define USE_ATOMIC_QUEUE 1
 #if USE_ATOMIC_QUEUE
 #include <atomic>
+#include <bit>
 namespace
 {
 static const int CACHE_LINE_SIZE = 128;
@@ -109,17 +110,24 @@ public:
        {
                LoggingEventPtr data;
                Node* next;
+               Node() : next(0) {}
                Node(const LoggingEventPtr& event, Node* n)
                        : data(event)
                        , next(n)
                { }
        };
 
-       AtomicQueue() : m_head(0) {}
+       AtomicQueue(size_t bufferSize)
+               : m_head(0)
+               , m_nextNode(0)
+               , m_nodeStore(std::bit_ceil(bufferSize + 2))
+       {}
 
        void push(const LoggingEventPtr& event)
        {
-               auto n = new Node(event, 
m_head.load(std::memory_order_relaxed));
+               auto index = m_nextNode++;
+               auto n = &m_nodeStore[index % m_nodeStore.size()];
+               *n = Node(event, m_head.load(std::memory_order_relaxed));
                while (!m_head.compare_exchange_weak(n->next, n, 
std::memory_order_release))
                {
                }
@@ -143,8 +151,15 @@ public:
                }
                return first;
        }
+
+       void setBufferSize(size_t bufferSize)
+       {
+               m_nodeStore.resize(std::bit_ceil(bufferSize + 2));
+       }
 private:
-       std::atomic<Node*> m_head;
+       alignas(CACHE_LINE_SIZE) std::atomic<Node*> m_head;
+       alignas(CACHE_LINE_SIZE) std::atomic<size_t> m_nextNode;
+       alignas(CACHE_LINE_SIZE) std::vector<Node> m_nodeStore;
 };
 } // namespace
 #endif
@@ -160,6 +175,9 @@ struct AsyncAppender::AsyncAppenderPriv : public 
AppenderSkeleton::AppenderSkele
                blocking(true)
 #if LOG4CXX_EVENTS_AT_EXIT
                , atExitRegistryRaii([this]{atExitActivated();})
+#endif
+#if USE_ATOMIC_QUEUE
+               , eventList(DEFAULT_BUFFER_SIZE)
 #endif
        {
        }
@@ -321,6 +339,7 @@ void AsyncAppender::append(const spi::LoggingEventPtr& 
event, Pool& p)
                }
                else
                        --priv->approxListSize;
+               break;
                //
                //   Following code is only reachable if buffer is full
                //
@@ -468,6 +487,9 @@ void AsyncAppender::setBufferSize(int size)
 
        std::lock_guard<std::mutex> lock(priv->bufferMutex);
        priv->bufferSize = (size < 1) ? 1 : size;
+#if USE_ATOMIC_QUEUE
+       priv->eventList.setBufferSize(priv->bufferSize);
+#endif
        priv->bufferNotFull.notify_all();
 }
 
@@ -573,7 +595,7 @@ void AsyncAppender::dispatch()
                        else
                                isActive = false;
                        auto next = eventList->next;
-                       delete eventList;
+                       *eventList = AtomicQueue::Node();
                        eventList = next;
                }
                {

Reply via email to