This is an automated email from the ASF dual-hosted git repository.
alexey pushed a commit to branch branch-1.18.x
in repository https://gitbox.apache.org/repos/asf/kudu.git
The following commit(s) were added to refs/heads/branch-1.18.x by this push:
new db16ea2e7 KUDU-613: Free entries outside lock
db16ea2e7 is described below
commit db16ea2e74d81cc9c86e178f0619f335f008ada5
Author: Mahesh Reddy <[email protected]>
AuthorDate: Thu Dec 12 01:23:15 2024 -0500
KUDU-613: Free entries outside lock
Prior to this patch, entries being evicted were
freed within the scope of the lock. This patch
changes that behavior by freeing the entries
outside the lock for performance reasons.
Change-Id: I8e3bdd048b22ea5b323015b1497a24e5247c83f0
Reviewed-on: http://gerrit.cloudera.org:8080/22203
Reviewed-by: Kurt Deschler <[email protected]>
Tested-by: Kudu Jenkins
Reviewed-by: Alexey Serbin <[email protected]>
(cherry picked from commit 32bdfab127eb08edb120646b75073bda912f3e41)
Reviewed-on: http://gerrit.cloudera.org:8080/22211
Tested-by: Alexey Serbin <[email protected]>
Reviewed-by: Mahesh Reddy <[email protected]>
Reviewed-by: Abhishek Chennaka <[email protected]>
---
src/kudu/util/slru_cache-test.cc | 29 ++++---
src/kudu/util/slru_cache.cc | 166 ++++++++++++++++++++++++++-------------
src/kudu/util/slru_cache.h | 19 +++--
3 files changed, 140 insertions(+), 74 deletions(-)
diff --git a/src/kudu/util/slru_cache-test.cc b/src/kudu/util/slru_cache-test.cc
index 68b1547d1..a72e9bb35 100644
--- a/src/kudu/util/slru_cache-test.cc
+++ b/src/kudu/util/slru_cache-test.cc
@@ -19,6 +19,7 @@
#include <cstdint>
#include <cstring>
+#include <deque>
#include <memory>
#include <string>
#include <utility>
@@ -77,9 +78,12 @@ class SLRUCacheBaseTest : public KuduTest,
{ }
// Implementation of the EvictionCallback interface.
+ // A deque is used here with the latest freed entry inserted at the
beginning.
+ // With entries being freed outside of lock, now MRU entries are freed first
instead of the
+ // LRU entries. Using a deque makes it easier to verify this.
void EvictedEntry(Slice key, Slice val) override {
- evicted_keys_.push_back(DecodeInt(key));
- evicted_values_.push_back(DecodeInt(val));
+ evicted_keys_.push_front(DecodeInt(key));
+ evicted_values_.push_front(DecodeInt(val));
}
// Returns -1 if no key is found in cache.
@@ -149,8 +153,8 @@ class SLRUCacheBaseTest : public KuduTest,
const size_t protected_segment_size_;
const size_t total_cache_size_;
const uint32_t lookups_threshold_;
- std::vector<int> evicted_keys_;
- std::vector<int> evicted_values_;
+ std::deque<int> evicted_keys_;
+ std::deque<int> evicted_values_;
std::shared_ptr<MemTracker> mem_tracker_;
std::unique_ptr<ShardedSLRUCache> slru_cache_;
MetricRegistry metric_registry_;
@@ -292,8 +296,8 @@ TEST_P(SLRUCacheTest, Erase) {
// Erase second entry from protected segment.
Erase(200);
ASSERT_EQ(2, evicted_keys_.size());
- ASSERT_EQ(200, evicted_keys_[1]);
- ASSERT_EQ(201, evicted_values_[1]);
+ ASSERT_EQ(200, evicted_keys_[0]);
+ ASSERT_EQ(201, evicted_values_[0]);
}
// Underlying entry isn't actually freed until handle around it from lookup is
reset.
@@ -327,8 +331,8 @@ TEST_P(SLRUCacheTest, EntriesArePinned) {
// Reset lookup handle, entry is now freed.
h2.reset();
ASSERT_EQ(2, evicted_keys_.size());
- ASSERT_EQ(100, evicted_keys_[1]);
- ASSERT_EQ(102, evicted_values_[1]);
+ ASSERT_EQ(100, evicted_keys_[0]);
+ ASSERT_EQ(102, evicted_values_[0]);
ASSERT_EQ(2, metrics->probationary_segment_evictions.get()->value());
Insert(200, 201);
@@ -354,8 +358,8 @@ TEST_P(SLRUCacheTest, EntriesArePinned) {
// Reset lookup handle of entry that was upserted, it should be freed now.
h3.reset();
ASSERT_EQ(3, evicted_keys_.size());
- ASSERT_EQ(200, evicted_keys_[2]);
- ASSERT_EQ(201, evicted_values_[2]);
+ ASSERT_EQ(200, evicted_keys_[0]);
+ ASSERT_EQ(201, evicted_values_[0]);
ASSERT_EQ(1, metrics->protected_segment_evictions.get()->value());
// Erase value, lookup handle is still held so entry will not be freed yet.
@@ -367,8 +371,8 @@ TEST_P(SLRUCacheTest, EntriesArePinned) {
// Reset lookup handle, entry is now freed.
h4.reset();
ASSERT_EQ(4, evicted_keys_.size());
- ASSERT_EQ(200, evicted_keys_[3]);
- ASSERT_EQ(202, evicted_values_[3]);
+ ASSERT_EQ(200, evicted_keys_[0]);
+ ASSERT_EQ(202, evicted_values_[0]);
ASSERT_EQ(2, metrics->protected_segment_evictions.get()->value());
}
@@ -523,7 +527,6 @@ TEST_P(SLRUSingleShardCacheTest, DowngradeEviction) {
ASSERT_FALSE(ProbationaryContains(EncodeInt(last_key)));
ASSERT_TRUE(ProtectedContains(EncodeInt(last_key)));
-
// Verify that the LRU entries from the probationary segment are evicted to
make space for entry
// from protected segment being downgraded.
for (int i = 0; i < evicted_keys_.size(); ++i) {
diff --git a/src/kudu/util/slru_cache.cc b/src/kudu/util/slru_cache.cc
index 0c97e8b61..e319f8541 100644
--- a/src/kudu/util/slru_cache.cc
+++ b/src/kudu/util/slru_cache.cc
@@ -237,13 +237,14 @@ void SLRUCacheShard<segment>::Release(Handle* handle) {
}
template<>
-void SLRUCacheShard<Segment::kProbationary>::RemoveEntriesPastCapacity() {
+void
SLRUCacheShard<Segment::kProbationary>::RemoveEntriesPastCapacity(SLRUHandle**
free_entries) {
while (usage_ > capacity_ && rl_.next != &rl_) {
SLRUHandle* old = rl_.next;
RL_Remove(old);
table_.Remove(old->key(), old->hash);
if (Unref(old)) {
- FreeEntry(old);
+ old->next = *free_entries;
+ *free_entries = old;
}
}
}
@@ -251,7 +252,8 @@ void
SLRUCacheShard<Segment::kProbationary>::RemoveEntriesPastCapacity() {
// Inserts handle into the probationary shard and removes any entries past
capacity.
template<>
Handle* SLRUCacheShard<Segment::kProbationary>::Insert(SLRUHandle* handle,
- EvictionCallback*
eviction_callback) {
+ EvictionCallback*
eviction_callback,
+ SLRUHandle**
free_entries) {
// Set the remaining SLRUHandle members which were not already allocated
during Allocate().
handle->eviction_callback = eviction_callback;
// Two refs for the handle: one from SLRUCacheShard, one for the returned
handle.
@@ -270,10 +272,11 @@ Handle*
SLRUCacheShard<Segment::kProbationary>::Insert(SLRUHandle* handle,
if (old_entry != nullptr) {
RL_Remove(old_entry);
if (Unref(old_entry)) {
- FreeEntry(old_entry);
+ old_entry->next = *free_entries;
+ *free_entries = old_entry;
}
}
- RemoveEntriesPastCapacity();
+ RemoveEntriesPastCapacity(free_entries);
return reinterpret_cast<Handle*>(handle);
}
@@ -281,7 +284,8 @@ Handle*
SLRUCacheShard<Segment::kProbationary>::Insert(SLRUHandle* handle,
// Inserts handle into the protected shard when updating entry in the
protected segment.
template<>
Handle* SLRUCacheShard<Segment::kProtected>::Insert(SLRUHandle* handle,
- EvictionCallback*
eviction_callback) {
+ EvictionCallback*
eviction_callback,
+ SLRUHandle** free_entries)
{
handle->eviction_callback = eviction_callback;
// Two refs for the handle: one from SLRUCacheShard, one for the returned
handle.
// Even though this function is for updates in the protected segment, it's
treated similarly
@@ -303,7 +307,8 @@ Handle*
SLRUCacheShard<Segment::kProtected>::Insert(SLRUHandle* handle,
DCHECK(old_entry != nullptr);
RL_Remove(old_entry);
if (Unref(old_entry)) {
- FreeEntry(old_entry);
+ old_entry->next = *free_entries;
+ *free_entries = old_entry;
}
return reinterpret_cast<Handle*>(handle);
@@ -311,7 +316,8 @@ Handle*
SLRUCacheShard<Segment::kProtected>::Insert(SLRUHandle* handle,
// Inserts handle into the probationary shard when downgrading entry from the
protected segment.
template<>
-void SLRUCacheShard<Segment::kProbationary>::ReInsert(SLRUHandle* handle) {
+void SLRUCacheShard<Segment::kProbationary>::ReInsert(SLRUHandle* handle,
+ SLRUHandle**
free_entries) {
handle->in_protected_segment.store(false, std::memory_order_relaxed);
if (PREDICT_TRUE(metrics_)) {
metrics_->downgrades->Increment();
@@ -324,12 +330,14 @@ void
SLRUCacheShard<Segment::kProbationary>::ReInsert(SLRUHandle* handle) {
// No entries should exist with same key in probationary segment when
downgrading.
SLRUHandle* old_entry = table_.Insert(handle);
DCHECK(old_entry == nullptr);
- RemoveEntriesPastCapacity();
+ RemoveEntriesPastCapacity(free_entries);
}
// Inserts handle into the protected shard when upgrading entry from the
probationary segment.
+// The parameter 'free_entries' is unused in this implementation.
template<>
-void SLRUCacheShard<Segment::kProtected>::ReInsert(SLRUHandle* handle) {
+void SLRUCacheShard<Segment::kProtected>::ReInsert(SLRUHandle* handle,
+ SLRUHandle**
/*free_entries*/) {
handle->in_protected_segment.store(true, std::memory_order_relaxed);
if (PREDICT_TRUE(metrics_)) {
metrics_->upgrades->Increment();
@@ -346,13 +354,13 @@ void
SLRUCacheShard<Segment::kProtected>::ReInsert(SLRUHandle* handle) {
}
template<Segment segment>
-void SLRUCacheShard<segment>::Erase(const Slice& key, uint32_t hash) {
+void SLRUCacheShard<segment>::Erase(const Slice& key, uint32_t hash,
SLRUHandle** free_entry) {
SLRUHandle* e = table_.Remove(key, hash);
if (e != nullptr) {
RL_Remove(e);
// Free entry if this is the last reference.
if (Unref(e)) {
- FreeEntry(e);
+ *free_entry = e;
}
}
}
@@ -393,15 +401,38 @@ void SLRUCacheShardPair::SetMetrics(SLRUCacheMetrics*
metrics) {
// Look at Cache::Insert() for more details.
Handle* SLRUCacheShardPair::Insert(SLRUHandle* handle,
EvictionCallback* eviction_callback) {
- std::lock_guard l(mutex_);
- if (!ProtectedContains(handle->key(), handle->hash)) {
- return probationary_shard_.Insert(handle, eviction_callback);
+ Handle* inserted_handle;
+ SLRUHandle* probationary_free_entries = nullptr;
+ SLRUHandle* protected_free_entries = nullptr;
+ {
+ std::lock_guard l(mutex_);
+ if (!ProtectedContains(handle->key(), handle->hash)) {
+ inserted_handle = probationary_shard_.Insert(handle,
+ eviction_callback,
+ &probationary_free_entries);
+ } else {
+ inserted_handle = protected_shard_.Insert(handle,
+ eviction_callback,
+ &protected_free_entries);
+ // If newly inserted entry has greater charge than previous one,
+ // possible that entries can be evicted if at capacity.
+ DowngradeEntries(&probationary_free_entries);
+ }
+ }
+ // Free entries outside lock for performance reasons.
+ // The evicted entries are freed in the opposite order of their eviction.
+ // Of the evicted entries, the MRU entries are freed first and the LRU
entries are freed last.
+ while (probationary_free_entries != nullptr) {
+ auto* next = probationary_free_entries->next;
+ probationary_shard_.FreeEntry(probationary_free_entries);
+ probationary_free_entries = next;
}
- auto* inserted = protected_shard_.Insert(handle, eviction_callback);
- // If newly inserted entry has greater charge than previous one,
- // possible that entries can be evicted if at capacity.
- DowngradeEntries();
- return inserted;
+ while (protected_free_entries != nullptr) {
+ auto* next = protected_free_entries->next;
+ protected_shard_.FreeEntry(protected_free_entries);
+ protected_free_entries = next;
+ }
+ return inserted_handle;
}
Handle* SLRUCacheShardPair::Lookup(const Slice& key, uint32_t hash, bool
caching) {
@@ -416,40 +447,54 @@ Handle* SLRUCacheShardPair::Lookup(const Slice& key,
uint32_t hash, bool caching
// - Miss: Return the handle.
//
// Lookup metrics for both segments and the high-level cache are updated
with each lookup.
- std::lock_guard l(mutex_);
- Handle* protected_handle = protected_shard_.Lookup(key, hash, caching);
- // If the key exists in the protected segment, return the result from the
lookup of the
- // protected segment.
- if (protected_handle) {
+ SLRUHandle* probationary_free_entries = nullptr;
+ Handle* probationary_handle;
+ {
+ std::lock_guard l(mutex_);
+ auto* protected_handle = protected_shard_.Lookup(key, hash, caching);
+
+ // If the key exists in the protected segment, return the result from the
lookup of the
+ // protected segment.
+ if (protected_handle) {
+ protected_shard_.UpdateMetricsLookup(true, caching);
+ probationary_shard_.UpdateSegmentMetricsLookup(false, caching);
+ return protected_handle;
+ }
+ probationary_handle = probationary_shard_.Lookup(key, hash, caching);
+
+ // Return null handle if handle is not found in either the probationary or
protected segment.
+ if (!probationary_handle) {
+ protected_shard_.UpdateMetricsLookup(false, caching);
+ return probationary_handle;
+ }
protected_shard_.UpdateMetricsLookup(true, caching);
- probationary_shard_.UpdateSegmentMetricsLookup(false, caching);
- return protected_handle;
- }
- Handle* probationary_handle = probationary_shard_.Lookup(key, hash, caching);
+ auto* val_handle = reinterpret_cast<SLRUHandle*>(probationary_handle);
+ // If the number of lookups for entry isn't at the minimum number required
before
+ // upgrading to the protected segment, return the entry found in
probationary segment.
+ // If the entry's charge is larger than the protected segment's capacity,
return entry found
+ // in probationary segment to avoid evicting any entries in the protected
segment.
+ if (val_handle->lookups < lookups_threshold_ ||
+ val_handle->charge > protected_shard_.capacity()) {
+ return probationary_handle;
+ }
- // Return null handle if handle is not found in either the probationary or
protected segment.
- if (!probationary_handle) {
- protected_shard_.UpdateMetricsLookup(false, caching);
- return probationary_handle;
- }
- protected_shard_.UpdateMetricsLookup(true, caching);
- auto* val_handle = reinterpret_cast<SLRUHandle*>(probationary_handle);
- // If the number of lookups for entry isn't at the minimum number required
before
- // upgrading to the protected segment, return the entry found in
probationary segment.
- // If the entry's charge is larger than the protected segment's capacity,
return entry found
- // in probationary segment to avoid evicting any entries in the protected
segment.
- if (val_handle->lookups < lookups_threshold_ ||
- val_handle->charge > protected_shard_.capacity()) {
- return probationary_handle;
+ // Upgrade entry from the probationary segment by erasing it from the
+ // probationary segment then adding entry to the protected segment.
+ // Downgrade any entries in the protected segment if past capacity.
+ probationary_shard_.SoftErase(key, hash);
+ protected_shard_.ReInsert(val_handle, nullptr);
+ DowngradeEntries(&probationary_free_entries);
}
- // Upgrade entry from the probationary segment by erasing it from the
- // probationary segment then adding entry to the protected segment.
- // Downgrade any entries in the protected segment if past capacity.
- probationary_shard_.SoftErase(key, hash);
- protected_shard_.ReInsert(val_handle);
- DowngradeEntries();
+ // Free entries outside lock for performance reasons.
+ // The evicted entries are freed in the opposite order of their eviction.
+ // Of the evicted entries, the MRU entries are freed first and the LRU
entries are freed last.
+ while (probationary_free_entries != nullptr) {
+ auto* next = probationary_free_entries->next;
+ probationary_shard_.FreeEntry(probationary_free_entries);
+ probationary_free_entries = next;
+ }
return probationary_handle;
}
@@ -466,9 +511,20 @@ void SLRUCacheShardPair::Release(Handle* handle) {
}
void SLRUCacheShardPair::Erase(const Slice& key, uint32_t hash) {
- std::lock_guard l(mutex_);
- probationary_shard_.Erase(key, hash);
- protected_shard_.Erase(key, hash);
+ SLRUHandle* probationary_free_entry = nullptr;
+ SLRUHandle* protected_free_entry = nullptr;
+ {
+ std::lock_guard l(mutex_);
+ probationary_shard_.Erase(key, hash, &probationary_free_entry);
+ protected_shard_.Erase(key, hash, &protected_free_entry);
+ }
+
+ // Free entry outside lock for performance reasons.
+ if (probationary_free_entry) {
+ probationary_shard_.FreeEntry(probationary_free_entry);
+ } else if (protected_free_entry) {
+ protected_shard_.FreeEntry(protected_free_entry);
+ }
}
bool SLRUCacheShardPair::ProbationaryContains(const Slice& key, uint32_t hash)
{
@@ -479,19 +535,19 @@ bool SLRUCacheShardPair::ProtectedContains(const Slice&
key, uint32_t hash) {
return protected_shard_.Contains(key, hash);
}
-void SLRUCacheShardPair::DowngradeEntries() {
+void SLRUCacheShardPair::DowngradeEntries(SLRUHandle** free_entries) {
// Removes LRU entries of the protected shard while its usage exceeds its
capacity
// and reinserts them into the probationary shard.
while (protected_shard_.usage_ > protected_shard_.capacity_ &&
protected_shard_.rl_.next != &protected_shard_.rl_) {
- SLRUHandle* lru_entry = protected_shard_.rl_.next;
+ auto* lru_entry = protected_shard_.rl_.next;
protected_shard_.RL_Remove(lru_entry);
protected_shard_.table_.Remove(lru_entry->key(), lru_entry->hash);
if (PREDICT_TRUE(protected_shard_.metrics_)) {
protected_shard_.UpdateMetricsEviction(lru_entry->charge);
}
DCHECK(lru_entry);
- probationary_shard_.ReInsert(lru_entry);
+ probationary_shard_.ReInsert(lru_entry, free_entries);
}
}
diff --git a/src/kudu/util/slru_cache.h b/src/kudu/util/slru_cache.h
index 2cee6b0da..5e60efe65 100644
--- a/src/kudu/util/slru_cache.h
+++ b/src/kudu/util/slru_cache.h
@@ -116,17 +116,21 @@ class SLRUCacheShard {
void SetMetrics(SLRUCacheMetrics* metrics) { metrics_ = metrics; }
// Inserts handle into the appropriate shard and returns the inserted handle.
+ // Returns entries to be freed outside the lock with parameter
'free_entries'.
// See comments on template specialization for each function for more
details.
- Handle* Insert(SLRUHandle* handle, EvictionCallback* eviction_callback);
+ Handle* Insert(SLRUHandle* handle,
+ EvictionCallback* eviction_callback,
+ SLRUHandle** free_entries);
// Inserts handle into the appropriate shard when upgrading or downgrading
entry.
+ // Returns entries to be freed outside the lock with parameter
'free_entries'.
// See comments on template specialization for each function for more
details.
- void ReInsert(SLRUHandle* handle);
+ void ReInsert(SLRUHandle* handle, SLRUHandle** free_entries);
// Like SLRUCache::Lookup, but with an extra "hash" parameter.
Handle* Lookup(const Slice& key, uint32_t hash, bool caching);
// Reduces the entry's ref by one, frees the entry if no refs are remaining.
void Release(Handle* handle);
- // Removes entry from shard, frees the entry if no refs are remaining.
- void Erase(const Slice& key, uint32_t hash);
+ // Removes entry from shard, returns it to be freed if no refs are remaining.
+ void Erase(const Slice& key, uint32_t hash, SLRUHandle** free_entry);
// Like Erase, but underlying entry is not freed.
// Necessary when upgrading entry to protected segment.
void SoftErase(const Slice& key, uint32_t hash);
@@ -151,7 +155,8 @@ class SLRUCacheShard {
// Updates eviction related metrics.
void UpdateMetricsEviction(size_t charge);
// Removes any entries past capacity of the probationary shard.
- void RemoveEntriesPastCapacity();
+ // Those entries are returned through parameter 'free_entries' to be freed
outside the lock.
+ void RemoveEntriesPastCapacity(SLRUHandle** free_entries);
// Update the memtracker's consumption by the given amount.
//
@@ -218,7 +223,9 @@ class SLRUCacheShardPair {
// Remove any entries past capacity in the protected shard and insert them
into the probationary
// shard. As a result of inserting them into the probationary shard, the LRU
entries of the
// probationary shard will be evicted if the probationary shard's usage
exceeds its capacity.
- void DowngradeEntries();
+ // Any entries evicted from the probationary shard are returned through the
parameter
+ // 'free_entries' to be freed outside the lock.
+ void DowngradeEntries(SLRUHandle** free_entries);
SLRUCacheShard<Segment::kProbationary> probationary_shard_;
SLRUCacheShard<Segment::kProtected> protected_shard_;