swebb2066 commented on PR #586:
URL: https://github.com/apache/logging-log4cxx/pull/586#issuecomment-3839357783

   The following improved change also prevents a race condition when events are 
added to AsyncAppender concurrently with a thread calling setBufferSize():
   ```
   diff --git a/src/main/cpp/asyncappender.cpp b/src/main/cpp/asyncappender.cpp
   index 8443d46f..a0a7c1c3 100644
   --- a/src/main/cpp/asyncappender.cpp
   +++ b/src/main/cpp/asyncappender.cpp
   @@ -250,6 +250,11 @@ struct AsyncAppender::AsyncAppenderPriv : public 
AppenderSkeleton::AppenderSkele
            * Used to ensure the dispatch thread does not wait when a logging 
thread is waiting.
           */
           alignas(hardware_constructive_interference_size) int blockedCount{0};
   +
   +       /**
   +        * Used to ensure resizing does not proceed when a logging thread is 
adding to the buffer.
   +       */
   +       alignas(hardware_constructive_interference_size) std::atomic<size_t> 
activeCount;
    };
   
   
   @@ -304,9 +309,11 @@ void AsyncAppender::doAppend(const 
spi::LoggingEventPtr& event, Pool& pool1)
   
    void AsyncAppender::append(const spi::LoggingEventPtr& event, Pool& p)
    {
   -       if (priv->bufferSize <= 0)
   +       auto activeCount = priv->activeCount.fetch_add(1, 
std::memory_order_acq_rel);
   +       if (priv->bufferSize <= 0 || (activeCount & 0x100000000u))
           {
                   priv->appenders.appendLoopOnAppenders(event, p);
   +               priv->activeCount.fetch_sub(1, std::memory_order_release);
                   return;
           }
   
   @@ -328,6 +335,7 @@ void AsyncAppender::append(const spi::LoggingEventPtr& 
event, Pool& p)
                           priv->discardMap.emplace(loggerName, DiscardSummary{ 
event, LOG4CXX_STR("from an attached appender") });
                   else
                           iter->second.add(event);
   +               priv->activeCount.fetch_sub(1, std::memory_order_release);
           }
           else while (true)
           {
   @@ -352,8 +360,10 @@ void AsyncAppender::append(const spi::LoggingEventPtr& 
event, Pool& p)
                                           std::this_thread::yield(); // Wait a 
bit
                           }
                           priv->bufferNotEmpty.notify_all();
   +                       priv->activeCount.fetch_sub(1, 
std::memory_order_release);
                           break;
                   }
   +               priv->activeCount.fetch_sub(1, std::memory_order_release);
                   //
                   //   Following code is only reachable if buffer is full or 
eventCount has overflowed
                   //
   @@ -397,6 +407,7 @@ void AsyncAppender::append(const spi::LoggingEventPtr& 
event, Pool& p)
   
                           break;
                   }
   +               priv->activeCount.fetch_add(1, std::memory_order_acq_rel);
           }
    }
   
   @@ -472,9 +483,17 @@ void AsyncAppender::setBufferSize(int size)
                   throw IllegalArgumentException(LOG4CXX_STR("size argument 
must be non-negative"));
           }
   
   +       auto activeCount = priv->activeCount.fetch_add(0x100000000u, 
std::memory_order_acq_rel) & 0xFFFFFFFFu;
   +       while (activeCount || priv->commitCount != priv->dispatchedCount)
   +       {
   +               priv->bufferNotEmpty.notify_all();
   +               std::this_thread::yield(); // Wait a bit
   +               activeCount = priv->activeCount & 0xFFFFFFFFu;
   +       }
           std::lock_guard<std::mutex> lock(priv->bufferMutex);
           priv->bufferSize = (size < 1) ? 1 : size;
           priv->buffer.resize(priv->bufferSize);
   +       priv->activeCount.fetch_sub(0x100000000u, std::memory_order_release);
           priv->bufferNotFull.notify_all();
    }
   ```
   
   


-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to