Copilot commented on code in PR #12794:
URL: https://github.com/apache/trafficserver/pull/12794#discussion_r2784666832
##########
src/tsutil/unit_tests/test_Bravo.cc:
##########
@@ -227,3 +227,543 @@ TEST_CASE("BRAVO - check with race", "[libts][BRAVO]")
CHECK(i == 2);
}
}
+
+TEST_CASE("Recursive BRAVO - exclusive lock", "[libts][BRAVO]")
+{
+ SECTION("single lock/unlock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+ mutex.unlock();
+ }
+
+ SECTION("recursive lock/unlock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+ mutex.lock();
+ mutex.lock();
+ mutex.unlock();
+ mutex.unlock();
+ mutex.unlock();
+ }
+
+ SECTION("try_lock by owner succeeds")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+ CHECK(mutex.try_lock() == true);
+ mutex.unlock();
+ mutex.unlock();
+ }
+
+ SECTION("try_lock by non-owner fails")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+
+ std::thread t{[&mutex]() { CHECK(mutex.try_lock() == false); }};
+ t.join();
+
+ mutex.unlock();
+ }
+
+ SECTION("recursive try_lock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ CHECK(mutex.try_lock() == true);
+ CHECK(mutex.try_lock() == true);
+ CHECK(mutex.try_lock() == true);
+ mutex.unlock();
+ mutex.unlock();
+ mutex.unlock();
+ }
+
+ SECTION("writer-writer blocking")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ int i = 0;
+
+ std::thread t1{[&]() {
+ std::lock_guard<ts::bravo::recursive_shared_mutex> lock(mutex);
+ std::this_thread::sleep_for(100ms);
+ CHECK(++i == 1);
+ }};
+
+ std::thread t2{[&]() {
+ std::this_thread::sleep_for(50ms);
+ std::lock_guard<ts::bravo::recursive_shared_mutex> lock(mutex);
+ CHECK(++i == 2);
+ }};
+
+ t1.join();
+ t2.join();
+
+ CHECK(i == 2);
+ }
+}
+
+TEST_CASE("Recursive BRAVO - shared lock", "[libts][BRAVO]")
+{
+ SECTION("single shared lock/unlock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ mutex.unlock_shared(token);
+ }
+
+ SECTION("recursive shared lock/unlock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token1{0};
+ ts::bravo::Token token2{0};
+ ts::bravo::Token token3{0};
+ mutex.lock_shared(token1);
+ mutex.lock_shared(token2);
+ mutex.lock_shared(token3);
+ // All tokens should be the same (cached)
+ CHECK(token1 == token2);
+ CHECK(token2 == token3);
+ mutex.unlock_shared(token3);
+ mutex.unlock_shared(token2);
+ mutex.unlock_shared(token1);
+ }
+
+ SECTION("try_lock_shared recursive")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token1{0};
+ ts::bravo::Token token2{0};
+ CHECK(mutex.try_lock_shared(token1) == true);
+ CHECK(mutex.try_lock_shared(token2) == true);
+ CHECK(token1 == token2);
+ mutex.unlock_shared(token2);
+ mutex.unlock_shared(token1);
+ }
+
+ SECTION("multiple readers concurrent")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ int i = 0;
+
+ std::thread t1{[&]() {
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ CHECK(i == 0);
+ std::this_thread::sleep_for(50ms);
+ mutex.unlock_shared(token);
+ }};
+
+ std::thread t2{[&]() {
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ CHECK(i == 0);
+ std::this_thread::sleep_for(50ms);
+ mutex.unlock_shared(token);
+ }};
+
+ t1.join();
+ t2.join();
+
+ CHECK(i == 0);
+ }
+
+ SECTION("shared blocks exclusive")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+
+ std::thread t{[&mutex]() { CHECK(mutex.try_lock() == false); }};
+ t.join();
+
+ mutex.unlock_shared(token);
+ }
+
+ SECTION("exclusive blocks shared")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+
+ std::thread t{[&mutex]() {
+ ts::bravo::Token token{0};
+ CHECK(mutex.try_lock_shared(token) == false);
+ }};
+ t.join();
+
+ mutex.unlock();
+ }
+}
+
+TEST_CASE("Recursive BRAVO - mixed lock scenarios", "[libts][BRAVO]")
+{
+ SECTION("downgrade: exclusive owner can acquire shared lock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+
+ // While holding exclusive lock, we can acquire shared lock
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ CHECK(token == 0); // Special token for downgrade
+
+ mutex.unlock_shared(token);
+ mutex.unlock();
+ }
+
+ SECTION("downgrade: try_lock_shared succeeds for exclusive owner")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+
+ ts::bravo::Token token{0};
+ CHECK(mutex.try_lock_shared(token) == true);
+ CHECK(token == 0); // Special token for downgrade
+
+ mutex.unlock_shared(token);
+ mutex.unlock();
+ }
+
+ SECTION("upgrade prevention: try_lock fails when holding shared lock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+
+ // Cannot upgrade: try_lock should fail
+ CHECK(mutex.try_lock() == false);
+
+ mutex.unlock_shared(token);
+ }
+
+ SECTION("downgrade with multiple shared locks")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+
+ ts::bravo::Token token1{0};
+ ts::bravo::Token token2{0};
+ mutex.lock_shared(token1);
+ mutex.lock_shared(token2);
+
+ mutex.unlock_shared(token2);
+ mutex.unlock_shared(token1);
+ mutex.unlock();
+ }
+
+ SECTION("proper unlock ordering: shared then exclusive")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+
+ // Unlock shared first, then exclusive
+ mutex.unlock_shared(token);
+ mutex.unlock();
+
+ // Mutex should be fully unlocked now
+ CHECK(mutex.try_lock() == true);
+ mutex.unlock();
+ }
+
+ SECTION("nested exclusive locks with shared in between")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+ mutex.lock(); // Recursive exclusive
+
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+
+ mutex.unlock_shared(token);
+ mutex.unlock(); // Second exclusive
+ mutex.unlock(); // First exclusive
+
+ // Mutex should be fully unlocked now
+ CHECK(mutex.try_lock() == true);
+ mutex.unlock();
+ }
+}
+
+TEST_CASE("Recursive BRAVO - BRAVO optimizations", "[libts][BRAVO]")
+{
+ SECTION("first shared lock gets token from underlying BRAVO mutex")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ // Token should be set by underlying BRAVO mutex (0 = slow path, >0 = fast
path)
+ // We can't guarantee which path is taken, but the lock should succeed
+ mutex.unlock_shared(token);
+ }
+
+ SECTION("recursive shared locks reuse cached token")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token1{0};
+ ts::bravo::Token token2{0};
+ ts::bravo::Token token3{0};
+
+ mutex.lock_shared(token1);
+ mutex.lock_shared(token2);
+ mutex.lock_shared(token3);
+
+ // All tokens should be identical (cached from first lock)
+ CHECK(token1 == token2);
+ CHECK(token2 == token3);
+
+ mutex.unlock_shared(token3);
+ mutex.unlock_shared(token2);
+ mutex.unlock_shared(token1);
+ }
+
+ SECTION("writer revocation then reader works")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+
+ // First, acquire and release a shared lock to enable read_bias
+ {
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ mutex.unlock_shared(token);
+ }
+
+ // Writer acquires lock (triggers revocation)
+ mutex.lock();
+ mutex.unlock();
+
+ // Reader should still work after writer releases
+ {
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ mutex.unlock_shared(token);
+ }
+ }
+
+ SECTION("multiple readers then writer then readers")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ std::atomic<int> readers_done{0};
+
Review Comment:
This file now uses `std::atomic` (e.g., readers_done) but does not include
<atomic>. Please add the missing header explicitly rather than relying on
transitive includes.
##########
include/tsutil/Bravo.h:
##########
@@ -372,4 +372,195 @@ template <typename T = std::shared_mutex, size_t
SLOT_SIZE = 256, int SLOWDOWN_G
using shared_mutex = shared_mutex_impl<>;
+/**
+ ts::bravo::recursive_shared_mutex_impl
+
+ A recursive version of shared_mutex_impl that allows the same thread
+ to acquire exclusive and shared locks multiple times.
+
+ Uses DenseThreadId for efficient per-thread state tracking without map
overhead.
+ Optimized to minimize expensive std::this_thread::get_id() calls by using
+ DenseThreadId for ownership tracking.
+
+ Mixed lock semantics:
+ - Upgrade prevention: A thread holding a shared lock cannot acquire an
exclusive lock
+ (would cause deadlock). try_lock() returns false, lock() asserts.
+ - Downgrade allowed: A thread holding an exclusive lock can acquire a
shared lock.
+ */
+template <typename T = shared_mutex_impl<>, size_t SLOT_SIZE = 256> class
recursive_shared_mutex_impl
+{
+ // Use a sentinel value for "no owner" - DenseThreadId values are 0 to
SLOT_SIZE-1
+ static constexpr size_t NO_OWNER = SLOT_SIZE;
+
+public:
+ recursive_shared_mutex_impl() = default;
+ ~recursive_shared_mutex_impl() = default;
+
+ // No copying or moving
+ recursive_shared_mutex_impl(recursive_shared_mutex_impl const &)
= delete;
+ recursive_shared_mutex_impl &operator=(recursive_shared_mutex_impl const &)
= delete;
+ recursive_shared_mutex_impl(recursive_shared_mutex_impl &&)
= delete;
+ recursive_shared_mutex_impl &operator=(recursive_shared_mutex_impl &&)
= delete;
+
+ ////
+ // Exclusive locking (recursive)
+ //
+ void
+ lock()
+ {
+ size_t tid = DenseThreadId::self();
+ // Fast path: check if we already own the lock
+ if (_exclusive_owner.load(std::memory_order_relaxed) == tid) {
+ ++_exclusive_count;
+ return;
+ }
+ // Upgrade prevention: cannot acquire exclusive lock while holding shared
lock
+ ThreadState &state = _thread_states[tid];
+ debug_assert(state.shared_count == 0);
+ _mutex.lock();
+ _exclusive_owner.store(tid, std::memory_order_relaxed);
+ _exclusive_count = 1;
+ }
+
+ bool
+ try_lock()
+ {
+ size_t tid = DenseThreadId::self();
+ // Fast path: check if we already own the lock
+ if (_exclusive_owner.load(std::memory_order_relaxed) == tid) {
+ ++_exclusive_count;
+ return true;
+ }
+ // Upgrade prevention: cannot acquire exclusive lock while holding shared
lock
+ ThreadState &state = _thread_states[tid];
+ if (state.shared_count > 0) {
+ return false;
+ }
+ if (_mutex.try_lock()) {
+ _exclusive_owner.store(tid, std::memory_order_relaxed);
+ _exclusive_count = 1;
+ return true;
+ }
+ return false;
+ }
+
+ void
+ unlock()
+ {
+ if (--_exclusive_count == 0) {
+ _exclusive_owner.store(NO_OWNER, std::memory_order_relaxed);
+ _mutex.unlock();
+ }
+ }
+
+ ////
+ // Shared locking (recursive)
+ //
+ void
+ lock_shared(Token &token)
+ {
+ size_t tid = DenseThreadId::self();
+ ThreadState &state = _thread_states[tid];
+
+ // Fast path: already holding shared lock - just increment count (most
common case)
+ size_t count = state.shared_count;
+ if (count > 0) {
+ state.shared_count = count + 1;
+ token = state.cached_token;
+ return;
+ }
+
+ // Check for downgrade: if we hold exclusive lock, allow shared lock
without acquiring underlying
+ if (_exclusive_owner.load(std::memory_order_relaxed) == tid) {
+ state.shared_count = 1;
+ token = 0; // Special token indicating we're under
exclusive lock
+ return;
+ }
+
+ // Slow path: acquire underlying lock
+ _mutex.lock_shared(state.cached_token);
+ state.shared_count = 1;
+ token = state.cached_token;
+ }
+
+ bool
+ try_lock_shared(Token &token)
+ {
+ size_t tid = DenseThreadId::self();
+ ThreadState &state = _thread_states[tid];
+
+ // Fast path: already holding shared lock - just increment count (most
common case)
+ size_t count = state.shared_count;
+ if (count > 0) {
+ state.shared_count = count + 1;
+ token = state.cached_token;
+ return true;
+ }
+
+ // Check for downgrade: if we hold exclusive lock, allow shared lock
without acquiring underlying
+ if (_exclusive_owner.load(std::memory_order_relaxed) == tid) {
+ state.shared_count = 1;
+ token = 0; // Special token indicating we're under
exclusive lock
+ return true;
+ }
+
+ // Slow path: try to acquire underlying lock
+ if (_mutex.try_lock_shared(state.cached_token)) {
+ state.shared_count = 1;
+ token = state.cached_token;
+ return true;
+ }
+ return false;
+ }
+
+ void
+ unlock_shared(const Token /* token */)
+ {
+ size_t tid = DenseThreadId::self();
+ ThreadState &state = _thread_states[tid];
+ if (--state.shared_count == 0) {
+ // Only unlock underlying mutex if we're not holding exclusive lock
+ if (_exclusive_owner.load(std::memory_order_relaxed) != tid) {
+ _mutex.unlock_shared(state.cached_token);
+ }
+ state.cached_token = 0;
Review Comment:
recursive_shared_mutex_impl::unlock_shared ignores the `token` parameter and
decides whether to unlock the underlying mutex based on `_exclusive_owner`.
This makes the correctness of unlock_shared depend on exclusive lock state
rather than on how the shared lock was acquired (e.g., the special `token=0`
path). Consider using the token value (or an explicit per-thread flag) to
distinguish the downgrade path reliably and add assertions for misuse.
```suggestion
unlock_shared(const Token token)
{
size_t tid = DenseThreadId::self();
ThreadState &state = _thread_states[tid];
TS_ASSERT(state.shared_count > 0);
if (--state.shared_count == 0) {
if (token == 0) {
// Downgrade path: shared lock acquired while holding exclusive lock;
// no underlying shared lock to release.
TS_ASSERT(_exclusive_owner.load(std::memory_order_relaxed) == tid);
} else {
// Regular shared lock: must release underlying shared lock.
TS_ASSERT(_exclusive_owner.load(std::memory_order_relaxed) != tid);
_mutex.unlock_shared(token);
state.cached_token = 0;
}
```
##########
src/iocore/cache/StripeSM.h:
##########
@@ -253,6 +267,24 @@ extern std::atomic<int> gnstripes;
extern ClassAllocator<OpenDirEntry, false> openDirEntryAllocator;
extern unsigned short *vol_hash_table;
+template <typename U, typename Func>
+U
+StripeSM::read_op(Func read_op) const
+{
+ ts::bravo::shared_lock lock(_shared_mutex);
+
+ return read_op();
+}
+
+template <typename U, typename Func>
+U
+StripeSM::write_op(Func write_op)
+{
+ std::lock_guard lock(_shared_mutex);
+
+ return write_op();
+}
Review Comment:
StripeSM::write_op (and similarly read_op) unconditionally does `return
functor();`. This breaks when the helper is instantiated for `void` (e.g.,
CacheDir.cc calls `_stripe->write_op<void>(...)`) because `return expr;` is
ill-formed in a void-returning function. Consider switching to `decltype(auto)`
and handling `void` via `if constexpr`, or add dedicated `void` overloads.
##########
include/tsutil/Bravo.h:
##########
@@ -372,4 +372,195 @@ template <typename T = std::shared_mutex, size_t
SLOT_SIZE = 256, int SLOWDOWN_G
using shared_mutex = shared_mutex_impl<>;
+/**
+ ts::bravo::recursive_shared_mutex_impl
+
+ A recursive version of shared_mutex_impl that allows the same thread
+ to acquire exclusive and shared locks multiple times.
+
+ Uses DenseThreadId for efficient per-thread state tracking without map
overhead.
+ Optimized to minimize expensive std::this_thread::get_id() calls by using
+ DenseThreadId for ownership tracking.
+
+ Mixed lock semantics:
+ - Upgrade prevention: A thread holding a shared lock cannot acquire an
exclusive lock
+ (would cause deadlock). try_lock() returns false, lock() asserts.
+ - Downgrade allowed: A thread holding an exclusive lock can acquire a
shared lock.
+ */
+template <typename T = shared_mutex_impl<>, size_t SLOT_SIZE = 256> class
recursive_shared_mutex_impl
+{
+ // Use a sentinel value for "no owner" - DenseThreadId values are 0 to
SLOT_SIZE-1
+ static constexpr size_t NO_OWNER = SLOT_SIZE;
+
+public:
+ recursive_shared_mutex_impl() = default;
+ ~recursive_shared_mutex_impl() = default;
+
+ // No copying or moving
+ recursive_shared_mutex_impl(recursive_shared_mutex_impl const &)
= delete;
+ recursive_shared_mutex_impl &operator=(recursive_shared_mutex_impl const &)
= delete;
+ recursive_shared_mutex_impl(recursive_shared_mutex_impl &&)
= delete;
+ recursive_shared_mutex_impl &operator=(recursive_shared_mutex_impl &&)
= delete;
+
+ ////
+ // Exclusive locking (recursive)
+ //
+ void
+ lock()
+ {
+ size_t tid = DenseThreadId::self();
+ // Fast path: check if we already own the lock
+ if (_exclusive_owner.load(std::memory_order_relaxed) == tid) {
+ ++_exclusive_count;
+ return;
+ }
+ // Upgrade prevention: cannot acquire exclusive lock while holding shared
lock
+ ThreadState &state = _thread_states[tid];
+ debug_assert(state.shared_count == 0);
+ _mutex.lock();
+ _exclusive_owner.store(tid, std::memory_order_relaxed);
+ _exclusive_count = 1;
+ }
+
+ bool
+ try_lock()
+ {
+ size_t tid = DenseThreadId::self();
+ // Fast path: check if we already own the lock
+ if (_exclusive_owner.load(std::memory_order_relaxed) == tid) {
+ ++_exclusive_count;
+ return true;
+ }
+ // Upgrade prevention: cannot acquire exclusive lock while holding shared
lock
+ ThreadState &state = _thread_states[tid];
+ if (state.shared_count > 0) {
+ return false;
+ }
+ if (_mutex.try_lock()) {
+ _exclusive_owner.store(tid, std::memory_order_relaxed);
+ _exclusive_count = 1;
+ return true;
+ }
+ return false;
+ }
+
+ void
+ unlock()
+ {
+ if (--_exclusive_count == 0) {
+ _exclusive_owner.store(NO_OWNER, std::memory_order_relaxed);
+ _mutex.unlock();
+ }
+ }
Review Comment:
recursive_shared_mutex_impl's shared-under-exclusive path does not acquire
an underlying shared lock (it just sets `token = 0`). If a caller unlocks the
exclusive lock while `ThreadState::shared_count` is still > 0, the thread will
no longer hold *any* lock, and a later unlock_shared may attempt to unlock an
underlying shared lock that was never acquired. Add a guard (e.g., debug_assert
shared_count==0 before releasing the underlying exclusive lock) or implement a
true downgrade mechanism.
##########
src/tsutil/unit_tests/test_Bravo.cc:
##########
@@ -227,3 +227,543 @@ TEST_CASE("BRAVO - check with race", "[libts][BRAVO]")
CHECK(i == 2);
}
}
+
+TEST_CASE("Recursive BRAVO - exclusive lock", "[libts][BRAVO]")
+{
+ SECTION("single lock/unlock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+ mutex.unlock();
+ }
+
+ SECTION("recursive lock/unlock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+ mutex.lock();
+ mutex.lock();
+ mutex.unlock();
+ mutex.unlock();
+ mutex.unlock();
+ }
+
+ SECTION("try_lock by owner succeeds")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+ CHECK(mutex.try_lock() == true);
+ mutex.unlock();
+ mutex.unlock();
+ }
+
+ SECTION("try_lock by non-owner fails")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+
+ std::thread t{[&mutex]() { CHECK(mutex.try_lock() == false); }};
+ t.join();
+
+ mutex.unlock();
+ }
+
+ SECTION("recursive try_lock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ CHECK(mutex.try_lock() == true);
+ CHECK(mutex.try_lock() == true);
+ CHECK(mutex.try_lock() == true);
+ mutex.unlock();
+ mutex.unlock();
+ mutex.unlock();
+ }
+
+ SECTION("writer-writer blocking")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ int i = 0;
+
+ std::thread t1{[&]() {
+ std::lock_guard<ts::bravo::recursive_shared_mutex> lock(mutex);
+ std::this_thread::sleep_for(100ms);
+ CHECK(++i == 1);
+ }};
+
+ std::thread t2{[&]() {
+ std::this_thread::sleep_for(50ms);
+ std::lock_guard<ts::bravo::recursive_shared_mutex> lock(mutex);
+ CHECK(++i == 2);
+ }};
+
+ t1.join();
+ t2.join();
+
+ CHECK(i == 2);
+ }
+}
+
+TEST_CASE("Recursive BRAVO - shared lock", "[libts][BRAVO]")
+{
+ SECTION("single shared lock/unlock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ mutex.unlock_shared(token);
+ }
+
+ SECTION("recursive shared lock/unlock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token1{0};
+ ts::bravo::Token token2{0};
+ ts::bravo::Token token3{0};
+ mutex.lock_shared(token1);
+ mutex.lock_shared(token2);
+ mutex.lock_shared(token3);
+ // All tokens should be the same (cached)
+ CHECK(token1 == token2);
+ CHECK(token2 == token3);
+ mutex.unlock_shared(token3);
+ mutex.unlock_shared(token2);
+ mutex.unlock_shared(token1);
+ }
+
+ SECTION("try_lock_shared recursive")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token1{0};
+ ts::bravo::Token token2{0};
+ CHECK(mutex.try_lock_shared(token1) == true);
+ CHECK(mutex.try_lock_shared(token2) == true);
+ CHECK(token1 == token2);
+ mutex.unlock_shared(token2);
+ mutex.unlock_shared(token1);
+ }
+
+ SECTION("multiple readers concurrent")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ int i = 0;
+
+ std::thread t1{[&]() {
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ CHECK(i == 0);
+ std::this_thread::sleep_for(50ms);
+ mutex.unlock_shared(token);
+ }};
+
+ std::thread t2{[&]() {
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ CHECK(i == 0);
+ std::this_thread::sleep_for(50ms);
+ mutex.unlock_shared(token);
+ }};
+
+ t1.join();
+ t2.join();
+
+ CHECK(i == 0);
+ }
+
+ SECTION("shared blocks exclusive")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+
+ std::thread t{[&mutex]() { CHECK(mutex.try_lock() == false); }};
+ t.join();
+
+ mutex.unlock_shared(token);
+ }
+
+ SECTION("exclusive blocks shared")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+
+ std::thread t{[&mutex]() {
+ ts::bravo::Token token{0};
+ CHECK(mutex.try_lock_shared(token) == false);
+ }};
+ t.join();
+
+ mutex.unlock();
+ }
+}
+
+TEST_CASE("Recursive BRAVO - mixed lock scenarios", "[libts][BRAVO]")
+{
+ SECTION("downgrade: exclusive owner can acquire shared lock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+
+ // While holding exclusive lock, we can acquire shared lock
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ CHECK(token == 0); // Special token for downgrade
+
+ mutex.unlock_shared(token);
+ mutex.unlock();
+ }
+
+ SECTION("downgrade: try_lock_shared succeeds for exclusive owner")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+
+ ts::bravo::Token token{0};
+ CHECK(mutex.try_lock_shared(token) == true);
+ CHECK(token == 0); // Special token for downgrade
+
+ mutex.unlock_shared(token);
+ mutex.unlock();
+ }
+
+ SECTION("upgrade prevention: try_lock fails when holding shared lock")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+
+ // Cannot upgrade: try_lock should fail
+ CHECK(mutex.try_lock() == false);
+
+ mutex.unlock_shared(token);
+ }
+
+ SECTION("downgrade with multiple shared locks")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+
+ ts::bravo::Token token1{0};
+ ts::bravo::Token token2{0};
+ mutex.lock_shared(token1);
+ mutex.lock_shared(token2);
+
+ mutex.unlock_shared(token2);
+ mutex.unlock_shared(token1);
+ mutex.unlock();
+ }
+
+ SECTION("proper unlock ordering: shared then exclusive")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+
+ // Unlock shared first, then exclusive
+ mutex.unlock_shared(token);
+ mutex.unlock();
+
+ // Mutex should be fully unlocked now
+ CHECK(mutex.try_lock() == true);
+ mutex.unlock();
+ }
+
+ SECTION("nested exclusive locks with shared in between")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ mutex.lock();
+ mutex.lock(); // Recursive exclusive
+
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+
+ mutex.unlock_shared(token);
+ mutex.unlock(); // Second exclusive
+ mutex.unlock(); // First exclusive
+
+ // Mutex should be fully unlocked now
+ CHECK(mutex.try_lock() == true);
+ mutex.unlock();
+ }
+}
+
+TEST_CASE("Recursive BRAVO - BRAVO optimizations", "[libts][BRAVO]")
+{
+ SECTION("first shared lock gets token from underlying BRAVO mutex")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ // Token should be set by underlying BRAVO mutex (0 = slow path, >0 = fast
path)
+ // We can't guarantee which path is taken, but the lock should succeed
+ mutex.unlock_shared(token);
+ }
+
+ SECTION("recursive shared locks reuse cached token")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ ts::bravo::Token token1{0};
+ ts::bravo::Token token2{0};
+ ts::bravo::Token token3{0};
+
+ mutex.lock_shared(token1);
+ mutex.lock_shared(token2);
+ mutex.lock_shared(token3);
+
+ // All tokens should be identical (cached from first lock)
+ CHECK(token1 == token2);
+ CHECK(token2 == token3);
+
+ mutex.unlock_shared(token3);
+ mutex.unlock_shared(token2);
+ mutex.unlock_shared(token1);
+ }
+
+ SECTION("writer revocation then reader works")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+
+ // First, acquire and release a shared lock to enable read_bias
+ {
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ mutex.unlock_shared(token);
+ }
+
+ // Writer acquires lock (triggers revocation)
+ mutex.lock();
+ mutex.unlock();
+
+ // Reader should still work after writer releases
+ {
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ mutex.unlock_shared(token);
+ }
+ }
+
+ SECTION("multiple readers then writer then readers")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ std::atomic<int> readers_done{0};
+
+ // Start multiple readers
+ std::thread t1{[&]() {
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ std::this_thread::sleep_for(50ms);
+ mutex.unlock_shared(token);
+ ++readers_done;
+ }};
+
+ std::thread t2{[&]() {
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ std::this_thread::sleep_for(50ms);
+ mutex.unlock_shared(token);
+ ++readers_done;
+ }};
+
+ // Wait for readers to finish
+ t1.join();
+ t2.join();
+ CHECK(readers_done == 2);
+
+ // Writer acquires lock
+ mutex.lock();
+ mutex.unlock();
+
+ // More readers after writer
+ std::thread t3{[&]() {
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ mutex.unlock_shared(token);
+ ++readers_done;
+ }};
+
+ std::thread t4{[&]() {
+ ts::bravo::Token token{0};
+ mutex.lock_shared(token);
+ mutex.unlock_shared(token);
+ ++readers_done;
+ }};
+
+ t3.join();
+ t4.join();
+ CHECK(readers_done == 4);
+ }
+
+ SECTION("recursive shared lock with concurrent writer")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ std::atomic<bool> writer_done{false};
+
+ // Reader thread with recursive locks
+ std::thread reader{[&]() {
+ ts::bravo::Token token1{0};
+ ts::bravo::Token token2{0};
+ mutex.lock_shared(token1);
+ mutex.lock_shared(token2); // Recursive
+ CHECK(token1 == token2); // Should be same cached token
+ std::this_thread::sleep_for(100ms);
+ mutex.unlock_shared(token2);
+ mutex.unlock_shared(token1);
+ }};
+
+ // Writer thread tries to acquire after reader starts
+ std::thread writer{[&]() {
+ std::this_thread::sleep_for(50ms);
+ mutex.lock();
+ writer_done = true;
+ mutex.unlock();
+ }};
+
+ reader.join();
+ writer.join();
+ CHECK(writer_done == true);
+ }
+}
+
+TEST_CASE("Recursive BRAVO - stress test", "[libts][BRAVO]")
+{
+ SECTION("concurrent readers with recursive locks")
+ {
+ ts::bravo::recursive_shared_mutex mutex;
+ std::atomic<int> counter{0};
+ constexpr int NUM_THREADS = 8;
+ constexpr int NUM_ITERATIONS = 1000;
+
+ std::vector<std::thread> threads;
+ for (int i = 0; i < NUM_THREADS; ++i) {
Review Comment:
This file now uses `std::vector` for thread storage but does not include
<vector>. Please include <vector> explicitly to avoid build breaks from missing
transitive includes.
##########
src/iocore/cache/CacheDir.cc:
##########
@@ -110,48 +112,63 @@ OpenDir::open_write(CacheVC *cont, int allow_if_writers,
int max_writers)
dir_clear(&od->first_dir);
cont->od = od;
cont->write_vector = &od->vector;
- bucket[b].push(od);
+ _bucket[b].push(od);
return 1;
}
+/**
+ This event handler is called in two cases:
+
+ 1. Direct call from OpenDir::close_write - writer lock is already acquired
+ 2. Self retry through event system - need to acquire writer lock
+ */
int
-OpenDir::signal_readers(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
+OpenDir::signal_readers(int event, Event * /* ATS UNUSED */)
{
- Queue<CacheVC, Link_CacheVC_opendir_link> newly_delayed_readers;
- EThread *t = mutex->thread_holding;
- CacheVC *c = nullptr;
- while ((c = delayed_readers.dequeue())) {
- CACHE_TRY_LOCK(lock, c->mutex, t);
- if (lock.is_locked()) {
- c->f.open_read_timeout = 0;
- c->handleEvent(EVENT_IMMEDIATE, nullptr);
- continue;
+ auto write_op = [&] {
+ Queue<CacheVC, Link_CacheVC_opendir_link> newly_delayed_readers;
+ EThread *t = mutex->thread_holding;
+ CacheVC *c = nullptr;
+ while ((c = _delayed_readers.dequeue())) {
+ CACHE_TRY_LOCK(lock, c->mutex, t);
+ if (lock.is_locked()) {
Review Comment:
`EThread *t = mutex->thread_holding;` can be null when signal_readers is
invoked directly from close_write (OpenDir's ProxyMutex isn't held in that
path). CACHE_TRY_LOCK will dereference `t` (and the lock-failure injection code
uses `t->generator`), so this can crash. Use `this_ethread()` (or another
guaranteed non-null thread) as a fallback.
##########
src/iocore/cache/CacheDir.cc:
##########
@@ -110,48 +112,63 @@ OpenDir::open_write(CacheVC *cont, int allow_if_writers,
int max_writers)
dir_clear(&od->first_dir);
cont->od = od;
cont->write_vector = &od->vector;
- bucket[b].push(od);
+ _bucket[b].push(od);
return 1;
}
+/**
+ This event handler is called in two cases:
+
+ 1. Direct call from OpenDir::close_write - writer lock is already acquired
+ 2. Self retry through event system - need to acquire writer lock
+ */
int
-OpenDir::signal_readers(int /* event ATS_UNUSED */, Event * /* e ATS_UNUSED */)
+OpenDir::signal_readers(int event, Event * /* ATS UNUSED */)
{
- Queue<CacheVC, Link_CacheVC_opendir_link> newly_delayed_readers;
- EThread *t = mutex->thread_holding;
- CacheVC *c = nullptr;
- while ((c = delayed_readers.dequeue())) {
- CACHE_TRY_LOCK(lock, c->mutex, t);
- if (lock.is_locked()) {
- c->f.open_read_timeout = 0;
- c->handleEvent(EVENT_IMMEDIATE, nullptr);
- continue;
+ auto write_op = [&] {
+ Queue<CacheVC, Link_CacheVC_opendir_link> newly_delayed_readers;
+ EThread *t = mutex->thread_holding;
+ CacheVC *c = nullptr;
+ while ((c = _delayed_readers.dequeue())) {
+ CACHE_TRY_LOCK(lock, c->mutex, t);
+ if (lock.is_locked()) {
+ c->f.open_read_timeout = 0;
+ c->handleEvent(EVENT_IMMEDIATE, nullptr);
+ continue;
+ }
+ newly_delayed_readers.push(c);
}
- newly_delayed_readers.push(c);
- }
- if (newly_delayed_readers.head) {
- delayed_readers = newly_delayed_readers;
- EThread *t1 = newly_delayed_readers.head->mutex->thread_holding;
- if (!t1) {
- t1 = mutex->thread_holding;
+ if (newly_delayed_readers.head) {
+ _delayed_readers = newly_delayed_readers;
+ EThread *t1 = newly_delayed_readers.head->mutex->thread_holding;
+ if (!t1) {
+ t1 = mutex->thread_holding;
+ }
Review Comment:
When re-scheduling retries, `t1` can still be null (e.g., if the head VC's
mutex isn't currently held and OpenDir's mutex isn't held in this call path).
`t1->schedule_in(...)` will then dereference null. Ensure `t1` is always set to
a valid thread (e.g., fall back to `this_ethread()`) before calling schedule_in.
```suggestion
}
if (!t1) {
t1 = this_ethread();
}
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]