This is an automated email from the ASF dual-hosted git repository.
alexey pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/kudu.git
The following commit(s) were added to refs/heads/master by this push:
new 7102025af KUDU-613: Add SLRU Cache metrics
7102025af is described below
commit 7102025af776a65715ecdabcd3be469cb1e972ea
Author: Mahesh Reddy <[email protected]>
AuthorDate: Tue Feb 6 15:18:27 2024 -0500
KUDU-613: Add SLRU Cache metrics
This patch adds segment-specific metrics for both
the probationary and the protected segment along
with high-level metrics for the entire SLRU cache.
It also adds these same metrics to the block cache
so it can support a cache with SLRU eviction policy.
This patch adds a template parameter to the
SLRUCacheShard class to indicate which segment
a shard belongs to.
Change-Id: I1c8181ec6bea301605aaef7db0003c3eaef3072d
Reviewed-on: http://gerrit.cloudera.org:8080/21389
Tested-by: Alexey Serbin <[email protected]>
Reviewed-by: Alexey Serbin <[email protected]>
---
src/kudu/util/block_cache_metrics.cc | 126 +++++++++++++++++++++
src/kudu/util/cache_metrics.h | 29 +++++
src/kudu/util/slru_cache-test.cc | 13 +++
src/kudu/util/slru_cache.cc | 209 ++++++++++++++++++++++++++++++-----
src/kudu/util/slru_cache.h | 34 +++++-
5 files changed, 379 insertions(+), 32 deletions(-)
diff --git a/src/kudu/util/block_cache_metrics.cc
b/src/kudu/util/block_cache_metrics.cc
index a07de117c..c6a626dd4 100644
--- a/src/kudu/util/block_cache_metrics.cc
+++ b/src/kudu/util/block_cache_metrics.cc
@@ -17,6 +17,8 @@
#include "kudu/util/block_cache_metrics.h"
+#include <cstdint>
+
#include "kudu/util/metrics.h"
METRIC_DEFINE_counter(server, block_cache_inserts,
@@ -57,6 +59,98 @@ METRIC_DEFINE_gauge_uint64(server, block_cache_usage, "Block
Cache Memory Usage"
"Memory consumed by the block cache",
kudu::MetricLevel::kInfo);
+METRIC_DEFINE_counter(server, block_cache_upgrades,
+ "Block Cache Upgrades", kudu::MetricUnit::kBlocks,
+ "Number of blocks upgraded from the probationary segment
to "
+ "the protected segment of the block cache",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_downgrades,
+ "Block Cache downgrades", kudu::MetricUnit::kBlocks,
+ "Number of blocks downgraded from the protected segment
to "
+ "the probationary segment of the block cache",
+ kudu::MetricLevel::kDebug);
+
+METRIC_DEFINE_counter(server, block_cache_probationary_segment_inserts,
+ "Block Cache Probationary Segment Inserts",
kudu::MetricUnit::kBlocks,
+ "Number of blocks inserted in the probationary segment
of the cache",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_probationary_segment_lookups,
+ "Block Cache Probationary Segment Lookups",
kudu::MetricUnit::kBlocks,
+ "Number of blocks looked up from the probationary
segment of the cache",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_probationary_segment_evictions,
+ "Block Cache Probationary Segment Evictions",
kudu::MetricUnit::kBlocks,
+ "Number of blocks evicted from the probationary segment
of the cache",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_probationary_segment_misses,
+ "Block Cache Probationary Segment Misses",
kudu::MetricUnit::kBlocks,
+ "Number of lookups in the probationary segment that
didn't yield a block",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_probationary_segment_misses_caching,
+ "Block Cache Probationary Segment Misses (Caching)",
+ kudu::MetricUnit::kBlocks,
+ "Number of lookups in the probationary segment that were
expecting a block "
+ "that didn't yield one. Use this number instead of "
+ "block_cache_probationary_segment_misses when trying to
determine how "
+ "efficient the probationary segment is",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_probationary_segment_hits,
+ "Block Cache Probationary Segment Hits",
kudu::MetricUnit::kBlocks,
+ "Number of lookups in the probationary segment that
found a block",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_probationary_segment_hits_caching,
+ "Block Cache Probationary Segment Hits (Caching)",
kudu::MetricUnit::kBlocks,
+ "Number of lookups in the probationary segment that were
expecting a block "
+ "that found one. Use this number instead of "
+ "block_cache_probationary_segment_hits when trying to
determine "
+ "how efficient the probationary segment is",
+ kudu::MetricLevel::kDebug);
+
+METRIC_DEFINE_gauge_uint64(server, block_cache_probationary_segment_usage,
+ "Block Cache Probationary Segment Memory Usage",
+ kudu::MetricUnit::kBytes,
+ "Memory consumed by the probationary segment of the
block cache",
+ kudu::MetricLevel::kInfo);
+
+METRIC_DEFINE_counter(server, block_cache_protected_segment_inserts,
+ "Block Cache Protected Segment Inserts",
kudu::MetricUnit::kBlocks,
+ "Number of blocks inserted in the protected segment of
the cache",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_protected_segment_lookups,
+ "Block Cache Protected Segment Lookups",
kudu::MetricUnit::kBlocks,
+ "Number of blocks looked up from the protected segment
of the cache",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_protected_segment_evictions,
+ "Block Cache Protected Segment Evictions",
kudu::MetricUnit::kBlocks,
+ "Number of blocks evicted from the protected segment of
the cache",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_protected_segment_misses,
+ "Block Cache Protected Segment Misses",
kudu::MetricUnit::kBlocks,
+ "Number of lookups in the protected segment that didn't
yield a block",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_protected_segment_misses_caching,
+ "Block Cache Protected Segment Misses (Caching)",
kudu::MetricUnit::kBlocks,
+ "Number of lookups in the protected segment that were
expecting a block that "
+ "didn't yield one. Use this number instead of "
+ "block_cache_protected_segment_misses when trying to
determine "
+ "how efficient the protected segment is",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_protected_segment_hits,
+ "Block Cache Protected Segment Hits",
kudu::MetricUnit::kBlocks,
+ "Number of lookups in the protected segment that found a
block",
+ kudu::MetricLevel::kDebug);
+METRIC_DEFINE_counter(server, block_cache_protected_segment_hits_caching,
+ "Block Cache Protected Segment Hits (Caching)",
kudu::MetricUnit::kBlocks,
+ "Number of lookups in the protected segment that were
expecting a block that "
+ "found one. Use this number instead of
block_cache_protected_segment_hits "
+ "when trying to determine how efficient the protected
segment is",
+ kudu::MetricLevel::kDebug);
+
+METRIC_DEFINE_gauge_uint64(server, block_cache_protected_segment_usage,
+ "Block Cache Protected Segment Memory Usage",
kudu::MetricUnit::kBytes,
+ "Memory consumed by the protected segment of the
block cache",
+ kudu::MetricLevel::kInfo);
+
namespace kudu {
#define MINIT(member, x) member = METRIC_##x.Instantiate(entity)
@@ -71,6 +165,38 @@ BlockCacheMetrics::BlockCacheMetrics(const
scoped_refptr<MetricEntity>& entity)
MINIT(cache_misses_caching, block_cache_misses_caching);
GINIT(cache_usage, block_cache_usage);
}
+
+SLRUCacheMetrics::SLRUCacheMetrics(const scoped_refptr<MetricEntity>& entity) {
+ MINIT(inserts, block_cache_inserts);
+ MINIT(lookups, block_cache_lookups);
+ MINIT(evictions, block_cache_evictions);
+ MINIT(cache_hits, block_cache_hits);
+ MINIT(cache_hits_caching, block_cache_hits_caching);
+ MINIT(cache_misses, block_cache_misses);
+ MINIT(cache_misses_caching, block_cache_misses_caching);
+ GINIT(cache_usage, block_cache_usage);
+
+ MINIT(upgrades, block_cache_upgrades);
+ MINIT(downgrades, block_cache_downgrades);
+
+ MINIT(probationary_segment_inserts,
block_cache_probationary_segment_inserts);
+ MINIT(probationary_segment_lookups,
block_cache_probationary_segment_lookups);
+ MINIT(probationary_segment_evictions,
block_cache_probationary_segment_evictions);
+ MINIT(probationary_segment_cache_hits,
block_cache_probationary_segment_hits);
+ MINIT(probationary_segment_cache_hits_caching,
block_cache_probationary_segment_hits_caching);
+ MINIT(probationary_segment_cache_misses,
block_cache_probationary_segment_misses);
+ MINIT(probationary_segment_cache_misses_caching,
block_cache_probationary_segment_misses_caching);
+ GINIT(probationary_segment_cache_usage,
block_cache_probationary_segment_usage);
+
+ MINIT(protected_segment_inserts, block_cache_protected_segment_inserts);
+ MINIT(protected_segment_lookups, block_cache_protected_segment_lookups);
+ MINIT(protected_segment_evictions, block_cache_protected_segment_evictions);
+ MINIT(protected_segment_cache_hits, block_cache_protected_segment_hits);
+ MINIT(protected_segment_cache_hits_caching,
block_cache_protected_segment_hits_caching);
+ MINIT(protected_segment_cache_misses, block_cache_protected_segment_misses);
+ MINIT(protected_segment_cache_misses_caching,
block_cache_protected_segment_misses_caching);
+ GINIT(protected_segment_cache_usage, block_cache_protected_segment_usage);
+}
#undef MINIT
#undef GINIT
diff --git a/src/kudu/util/cache_metrics.h b/src/kudu/util/cache_metrics.h
index 9da0c2627..018880a3a 100644
--- a/src/kudu/util/cache_metrics.h
+++ b/src/kudu/util/cache_metrics.h
@@ -38,4 +38,33 @@ struct CacheMetrics {
scoped_refptr<AtomicGauge<uint64_t>> cache_usage;
};
+// TODO(mreddy): Calculate high-level cache metrics by using
+// information from segment-level metrics.
+struct SLRUCacheMetrics : public CacheMetrics {
+ explicit SLRUCacheMetrics(const scoped_refptr<MetricEntity>& entity);
+
+ scoped_refptr<Counter> upgrades;
+ scoped_refptr<Counter> downgrades;
+
+ scoped_refptr<Counter> probationary_segment_inserts;
+ scoped_refptr<Counter> probationary_segment_lookups;
+ scoped_refptr<Counter> probationary_segment_evictions;
+ scoped_refptr<Counter> probationary_segment_cache_hits;
+ scoped_refptr<Counter> probationary_segment_cache_hits_caching;
+ scoped_refptr<Counter> probationary_segment_cache_misses;
+ scoped_refptr<Counter> probationary_segment_cache_misses_caching;
+
+ scoped_refptr<AtomicGauge<uint64_t>> probationary_segment_cache_usage;
+
+ scoped_refptr<Counter> protected_segment_inserts;
+ scoped_refptr<Counter> protected_segment_lookups;
+ scoped_refptr<Counter> protected_segment_evictions;
+ scoped_refptr<Counter> protected_segment_cache_hits;
+ scoped_refptr<Counter> protected_segment_cache_hits_caching;
+ scoped_refptr<Counter> protected_segment_cache_misses;
+ scoped_refptr<Counter> protected_segment_cache_misses_caching;
+
+ scoped_refptr<AtomicGauge<uint64_t>> protected_segment_cache_usage;
+};
+
} // namespace kudu
diff --git a/src/kudu/util/slru_cache-test.cc b/src/kudu/util/slru_cache-test.cc
index 773828c1b..4b05befc7 100644
--- a/src/kudu/util/slru_cache-test.cc
+++ b/src/kudu/util/slru_cache-test.cc
@@ -28,10 +28,13 @@
#include <glog/logging.h>
#include <gtest/gtest.h>
+#include "kudu/gutil/ref_counted.h"
#include "kudu/util/cache.h"
+#include "kudu/util/cache_metrics.h"
#include "kudu/util/coding.h"
#include "kudu/util/faststring.h"
#include "kudu/util/mem_tracker.h"
+#include "kudu/util/metrics.h"
#include "kudu/util/slice.h"
#include "kudu/util/test_util.h"
@@ -131,6 +134,15 @@ class SLRUCacheBaseTest : public KuduTest,
MemTracker::FindTracker("slru_cache_test-sharded_slru_cache",
&mem_tracker_);
ASSERT_TRUE(mem_tracker_.get());
+
+ // slru_cache_ will be null if we're trying to set up a test for the NVM
cache
+ // and were unable to do so.
+ if (slru_cache_) {
+ scoped_refptr<MetricEntity> entity = METRIC_ENTITY_server.Instantiate(
+ &metric_registry_, "test");
+ std::unique_ptr<SLRUCacheMetrics> metrics(new SLRUCacheMetrics(entity));
+ slru_cache_->SetMetrics(std::move(metrics),
Cache::ExistingMetricsPolicy::kKeep);
+ }
}
const size_t probationary_segment_size_;
@@ -141,6 +153,7 @@ class SLRUCacheBaseTest : public KuduTest,
std::vector<int> evicted_values_;
std::shared_ptr<MemTracker> mem_tracker_;
std::unique_ptr<ShardedSLRUCache> slru_cache_;
+ MetricRegistry metric_registry_;
};
class SLRUCacheTest :
diff --git a/src/kudu/util/slru_cache.cc b/src/kudu/util/slru_cache.cc
index f8aafb6ae..4c5eb01d7 100644
--- a/src/kudu/util/slru_cache.cc
+++ b/src/kudu/util/slru_cache.cc
@@ -22,6 +22,7 @@
#include <memory>
#include <mutex>
#include <ostream>
+#include <utility>
#include <vector>
#include <gflags/gflags_declare.h>
@@ -30,13 +31,17 @@
#include "kudu/gutil/bits.h"
#include "kudu/gutil/hash/city.h"
#include "kudu/gutil/port.h"
+#include "kudu/gutil/ref_counted.h"
#include "kudu/gutil/strings/substitute.h"
#include "kudu/gutil/sysinfo.h"
#include "kudu/util/alignment.h"
#include "kudu/util/cache.h"
+#include "kudu/util/cache_metrics.h"
#include "kudu/util/malloc.h"
#include "kudu/util/mem_tracker.h"
+#include "kudu/util/metrics.h"
#include "kudu/util/slice.h"
+#include "kudu/util/test_util_prod.h"
DECLARE_bool(cache_force_single_shard);
DECLARE_double(cache_memtracker_approximation_ratio);
@@ -47,17 +52,20 @@ using EvictionCallback = kudu::Cache::EvictionCallback;
namespace kudu {
-SLRUCacheShard::SLRUCacheShard(MemTracker* tracker, size_t capacity)
+template<Segment segment>
+SLRUCacheShard<segment>::SLRUCacheShard(MemTracker* tracker, size_t capacity)
: capacity_(capacity),
usage_(0),
- mem_tracker_(tracker) {
+ mem_tracker_(tracker),
+ metrics_(nullptr) {
max_deferred_consumption_ = capacity *
FLAGS_cache_memtracker_approximation_ratio;
// Make empty circular linked list.
rl_.next = &rl_;
rl_.prev = &rl_;
}
-SLRUCacheShard::~SLRUCacheShard() {
+template<Segment segment>
+SLRUCacheShard<segment>::~SLRUCacheShard() {
for (SLRUHandle* e = rl_.next; e != &rl_; ) {
SLRUHandle* next = e->next;
DCHECK_EQ(e->refs.load(std::memory_order_relaxed), 1)
@@ -70,25 +78,49 @@ SLRUCacheShard::~SLRUCacheShard() {
mem_tracker_->Consume(deferred_consumption_);
}
-bool SLRUCacheShard::Unref(SLRUHandle* e) {
+template<Segment segment>
+bool SLRUCacheShard<segment>::Unref(SLRUHandle* e) {
DCHECK_GT(e->refs.load(std::memory_order_relaxed), 0);
return e->refs.fetch_sub(1) == 1;
}
-void SLRUCacheShard::FreeEntry(SLRUHandle* e) {
+template<Segment segment>
+void SLRUCacheShard<segment>::FreeEntry(SLRUHandle* e) {
DCHECK_EQ(e->refs.load(std::memory_order_relaxed), 0);
if (e->eviction_callback) {
e->eviction_callback->EvictedEntry(e->key(), e->value());
}
UpdateMemTracker(-static_cast<int64_t>(e->charge));
+ if (PREDICT_TRUE(metrics_)) {
+ metrics_->cache_usage->DecrementBy(e->charge);
+ metrics_->evictions->Increment();
+ UpdateMetricsEviction(e->charge);
+ }
delete [] e;
}
-void SLRUCacheShard::SoftFreeEntry(SLRUHandle* e) {
+template<Segment segment>
+void SLRUCacheShard<segment>::SoftFreeEntry(SLRUHandle* e) {
UpdateMemTracker(-static_cast<int64_t>(e->charge));
+ if (PREDICT_TRUE(metrics_)) {
+ UpdateMetricsEviction(e->charge);
+ }
+}
+
+template<>
+void SLRUCacheShard<Segment::kProbationary>::UpdateMetricsEviction(size_t
charge) {
+ metrics_->probationary_segment_cache_usage->DecrementBy(charge);
+ metrics_->probationary_segment_evictions->Increment();
}
-void SLRUCacheShard::UpdateMemTracker(int64_t delta) {
+template<>
+void SLRUCacheShard<Segment::kProtected>::UpdateMetricsEviction(size_t charge)
{
+ metrics_->protected_segment_cache_usage->DecrementBy(charge);
+ metrics_->protected_segment_evictions->Increment();
+}
+
+template<Segment segment>
+void SLRUCacheShard<segment>::UpdateMemTracker(int64_t delta) {
int64_t old_deferred = deferred_consumption_.fetch_add(delta);
int64_t new_deferred = old_deferred + delta;
@@ -99,14 +131,77 @@ void SLRUCacheShard::UpdateMemTracker(int64_t delta) {
}
}
-void SLRUCacheShard::RL_Remove(SLRUHandle* e) {
+template<Segment segment>
+void SLRUCacheShard<segment>::UpdateMetricsLookup(bool was_hit, bool caching) {
+ if (PREDICT_TRUE(metrics_)) {
+ metrics_->lookups->Increment();
+ if (was_hit) {
+ if (caching) {
+ metrics_->cache_hits_caching->Increment();
+ } else {
+ metrics_->cache_hits->Increment();
+ }
+ } else {
+ if (caching) {
+ metrics_->cache_misses_caching->Increment();
+ } else {
+ metrics_->cache_misses->Increment();
+ }
+ }
+ }
+}
+
+template<>
+void SLRUCacheShard<Segment::kProbationary>::UpdateSegmentMetricsLookup(bool
was_hit,
+ bool
caching) {
+ if (PREDICT_TRUE(metrics_)) {
+ metrics_->probationary_segment_lookups->Increment();
+ if (was_hit) {
+ if (caching) {
+ metrics_->probationary_segment_cache_hits_caching->Increment();
+ } else {
+ metrics_->probationary_segment_cache_hits->Increment();
+ }
+ } else {
+ if (caching) {
+ metrics_->probationary_segment_cache_misses_caching->Increment();
+ } else {
+ metrics_->probationary_segment_cache_misses->Increment();
+ }
+ }
+ }
+}
+
+template<>
+void SLRUCacheShard<Segment::kProtected>::UpdateSegmentMetricsLookup(bool
was_hit, bool caching) {
+ if (PREDICT_TRUE(metrics_)) {
+ metrics_->protected_segment_lookups->Increment();
+ if (was_hit) {
+ if (caching) {
+ metrics_->protected_segment_cache_hits_caching->Increment();
+ } else {
+ metrics_->protected_segment_cache_hits->Increment();
+ }
+ } else {
+ if (caching) {
+ metrics_->protected_segment_cache_misses_caching->Increment();
+ } else {
+ metrics_->protected_segment_cache_misses->Increment();
+ }
+ }
+ }
+}
+
+template<Segment segment>
+void SLRUCacheShard<segment>::RL_Remove(SLRUHandle* e) {
e->next->prev = e->prev;
e->prev->next = e->next;
DCHECK_GE(usage_, e->charge);
usage_ -= e->charge;
}
-void SLRUCacheShard::RL_Append(SLRUHandle* e) {
+template<Segment segment>
+void SLRUCacheShard<segment>::RL_Append(SLRUHandle* e) {
// Make "e" newest entry by inserting just before rl_.
e->next = &rl_;
e->prev = rl_.prev;
@@ -115,7 +210,8 @@ void SLRUCacheShard::RL_Append(SLRUHandle* e) {
usage_ += e->charge;
}
-void SLRUCacheShard::RL_UpdateAfterLookup(SLRUHandle* e) {
+template<Segment segment>
+void SLRUCacheShard<segment>::RL_UpdateAfterLookup(SLRUHandle* e) {
RL_Remove(e);
RL_Append(e);
}
@@ -123,22 +219,26 @@ void SLRUCacheShard::RL_UpdateAfterLookup(SLRUHandle* e) {
// No mutex is needed here since all the SLRUCacheShardPair methods that
access the underlying
// shards and its tables are protected by mutexes. Same logic applies to the
all the below
// methods in SLRUCacheShard.
-Handle* SLRUCacheShard::Lookup(const Slice& key, uint32_t hash, bool caching) {
+template<Segment segment>
+Handle* SLRUCacheShard<segment>::Lookup(const Slice& key, uint32_t hash, bool
caching) {
SLRUHandle* e = table_.Lookup(key, hash);
if (e != nullptr) {
e->refs.fetch_add(1, std::memory_order_relaxed);
e->lookups++;
RL_UpdateAfterLookup(e);
}
+ UpdateSegmentMetricsLookup(e != nullptr, caching);
return reinterpret_cast<Handle*>(e);
}
-bool SLRUCacheShard::Contains(const Slice& key, uint32_t hash) {
+template<Segment segment>
+bool SLRUCacheShard<segment>::Contains(const Slice& key, uint32_t hash) {
return table_.Lookup(key, hash) != nullptr;
}
-void SLRUCacheShard::Release(Handle* handle) {
+template<Segment segment>
+void SLRUCacheShard<segment>::Release(Handle* handle) {
SLRUHandle* e = reinterpret_cast<SLRUHandle*>(handle);
// If this is the last reference of the handle, the entry will be freed.
if (Unref(e)) {
@@ -146,14 +246,20 @@ void SLRUCacheShard::Release(Handle* handle) {
}
}
-Handle* SLRUCacheShard::Insert(SLRUHandle* handle,
- EvictionCallback* eviction_callback) {
+template<>
+Handle* SLRUCacheShard<Segment::kProbationary>::Insert(SLRUHandle* handle,
+ EvictionCallback*
eviction_callback) {
// Set the remaining SLRUHandle members which were not already allocated
during Allocate().
handle->eviction_callback = eviction_callback;
// Two refs for the handle: one from SLRUCacheShard, one for the returned
handle.
handle->refs.store(2, std::memory_order_relaxed);
UpdateMemTracker(handle->charge);
-
+ if (PREDICT_TRUE(metrics_)) {
+ metrics_->cache_usage->IncrementBy(handle->charge);
+ metrics_->inserts->Increment();
+ metrics_->probationary_segment_cache_usage->IncrementBy(handle->charge);
+ metrics_->probationary_segment_inserts->Increment();
+ }
RL_Append(handle);
SLRUHandle* old_entry = table_.Insert(handle);
@@ -176,10 +282,17 @@ Handle* SLRUCacheShard::Insert(SLRUHandle* handle,
return reinterpret_cast<Handle*>(handle);
}
-vector<SLRUHandle*> SLRUCacheShard::InsertAndReturnEvicted(SLRUHandle* handle)
{
+template<>
+vector<SLRUHandle*>
SLRUCacheShard<Segment::kProtected>::InsertAndReturnEvicted(
+ SLRUHandle* handle) {
handle->refs.fetch_add(1, std::memory_order_relaxed);
handle->in_protected_segment.store(true, std::memory_order_relaxed);
UpdateMemTracker(handle->charge);
+ if (PREDICT_TRUE(metrics_)) {
+ metrics_->upgrades->Increment();
+ metrics_->protected_segment_cache_usage->IncrementBy(handle->charge);
+ metrics_->protected_segment_inserts->Increment();
+ }
vector<SLRUHandle*> evicted_entries;
handle->Sanitize();
@@ -201,12 +314,19 @@ vector<SLRUHandle*>
SLRUCacheShard::InsertAndReturnEvicted(SLRUHandle* handle) {
return evicted_entries;
}
-Handle* SLRUCacheShard::ProtectedInsert(SLRUHandle* handle,
- EvictionCallback* eviction_callback,
- vector<SLRUHandle*>* evictions) {
+template<>
+Handle* SLRUCacheShard<Segment::kProtected>::ProtectedInsert(SLRUHandle*
handle,
+ EvictionCallback*
eviction_callback,
+
vector<SLRUHandle*>* evictions) {
handle->eviction_callback = eviction_callback;
handle->refs.store(2, std::memory_order_relaxed);
UpdateMemTracker(handle->charge);
+ if (PREDICT_TRUE(metrics_)) {
+ metrics_->cache_usage->IncrementBy(handle->charge);
+ metrics_->inserts->Increment();
+ metrics_->protected_segment_cache_usage->IncrementBy(handle->charge);
+ metrics_->protected_segment_inserts->Increment();
+ }
RL_Append(handle);
@@ -230,10 +350,16 @@ Handle* SLRUCacheShard::ProtectedInsert(SLRUHandle*
handle,
return reinterpret_cast<Handle*>(handle);
}
-void SLRUCacheShard::ReInsert(SLRUHandle* handle) {
+template<>
+void SLRUCacheShard<Segment::kProbationary>::ReInsert(SLRUHandle* handle) {
handle->refs.fetch_add(1, std::memory_order_relaxed);
handle->in_protected_segment.store(false, std::memory_order_relaxed);
UpdateMemTracker(handle->charge);
+ if (PREDICT_TRUE(metrics_)) {
+ metrics_->downgrades->Increment();
+ metrics_->probationary_segment_cache_usage->IncrementBy(handle->charge);
+ metrics_->probationary_segment_inserts->Increment();
+ }
handle->Sanitize();
RL_Append(handle);
@@ -252,7 +378,8 @@ void SLRUCacheShard::ReInsert(SLRUHandle* handle) {
}
}
-void SLRUCacheShard::Erase(const Slice& key, uint32_t hash) {
+template<Segment segment>
+void SLRUCacheShard<segment>::Erase(const Slice& key, uint32_t hash) {
SLRUHandle* e = table_.Remove(key, hash);
if (e != nullptr) {
RL_Remove(e);
@@ -263,7 +390,8 @@ void SLRUCacheShard::Erase(const Slice& key, uint32_t hash)
{
}
}
-void SLRUCacheShard::SoftErase(const Slice& key, uint32_t hash) {
+template<Segment segment>
+void SLRUCacheShard<segment>::SoftErase(const Slice& key, uint32_t hash) {
SLRUHandle* e = table_.Remove(key, hash);
if (e != nullptr) {
RL_Remove(e);
@@ -272,15 +400,24 @@ void SLRUCacheShard::SoftErase(const Slice& key, uint32_t
hash) {
}
}
+template class SLRUCacheShard<Segment::kProtected>;
+template class SLRUCacheShard<Segment::kProbationary>;
+
SLRUCacheShardPair::SLRUCacheShardPair(MemTracker* mem_tracker,
size_t probationary_capacity,
size_t protected_capacity,
uint32_t lookups) :
- probationary_shard_(SLRUCacheShard(mem_tracker, probationary_capacity)),
- protected_shard_(SLRUCacheShard(mem_tracker, protected_capacity)),
+ probationary_shard_(SLRUCacheShard<Segment::kProbationary>(mem_tracker,
probationary_capacity)),
+ protected_shard_(SLRUCacheShard<Segment::kProtected>(mem_tracker,
protected_capacity)),
lookups_threshold_(lookups) {
}
+void SLRUCacheShardPair::SetMetrics(SLRUCacheMetrics* metrics) {
+ std::lock_guard l(mutex_);
+ probationary_shard_.SetMetrics(metrics);
+ protected_shard_.SetMetrics(metrics);
+}
+
// Commit a prepared entry into the probationary segment if entry does not
exist or if it
// exists in the probationary segment (upsert case).
// If entry exists in protected segment, entry will be upserted and any
evicted entries will
@@ -306,7 +443,7 @@ Handle* SLRUCacheShardPair::Insert(SLRUHandle* handle,
Handle* SLRUCacheShardPair::Lookup(const Slice& key, uint32_t hash, bool
caching) {
// Lookup protected segment:
// - Hit: Return handle.
- // - Miss: Lookup probationary segment
+ // - Miss: Lookup probationary segment:
// - Hit: If the number of lookups is < than 'lookups_threshold_',
return the lookup handle.
// If the number of lookups is >= than 'lookups_threshold_',
upgrade the entry:
// Erase entry from the probationary segment and insert entry
into the protected
@@ -314,20 +451,25 @@ Handle* SLRUCacheShardPair::Lookup(const Slice& key,
uint32_t hash, bool caching
// protected segment, insert them into the probationary
segment.
// - Miss: Return the handle.
//
+ // Lookup metrics for both segments and the high-level cache are updated
with each lookup.
std::lock_guard<decltype(mutex_)> l(mutex_);
Handle* protected_handle = protected_shard_.Lookup(key, hash, caching);
// If the key exists in the protected segment, return the result from the
lookup of the
// protected segment.
if (protected_handle) {
+ protected_shard_.UpdateMetricsLookup(true, caching);
+ probationary_shard_.UpdateSegmentMetricsLookup(false, caching);
return protected_handle;
}
Handle* probationary_handle = probationary_shard_.Lookup(key, hash, caching);
// Return null handle if handle is not found in either the probationary or
protected segment.
if (!probationary_handle) {
+ protected_shard_.UpdateMetricsLookup(false, caching);
return probationary_handle;
}
+ protected_shard_.UpdateMetricsLookup(true, caching);
auto* val_handle = reinterpret_cast<SLRUHandle*>(probationary_handle);
// If the number of lookups for entry isn't at the minimum number required
before
// upgrading to the protected segment, return the entry found in
probationary segment.
@@ -449,6 +591,21 @@ Cache::UniqueHandle
ShardedSLRUCache::Insert(UniquePendingHandle handle,
HandleDeleter(this));
}
+void ShardedSLRUCache::SetMetrics(std::unique_ptr<CacheMetrics> metrics,
+ ExistingMetricsPolicy metrics_policy) {
+ std::lock_guard l(metrics_lock_);
+ if (metrics_ && metrics_policy == ExistingMetricsPolicy::kKeep) {
+ CHECK(IsGTest()) << "Metrics should only be set once per Cache";
+ return;
+ }
+ metrics_ = std::move(metrics);
+ auto* metrics_ptr = dynamic_cast<SLRUCacheMetrics*>(metrics_.get());
+ DCHECK(metrics_ptr != nullptr);
+ for (auto& shard_pair : shards_) {
+ shard_pair->SetMetrics(metrics_ptr);
+ }
+}
+
void ShardedSLRUCache::Release(Handle* handle) {
SLRUHandle* h = reinterpret_cast<SLRUHandle*>(handle);
shards_[Shard(h->hash)]->Release(handle);
diff --git a/src/kudu/util/slru_cache.h b/src/kudu/util/slru_cache.h
index aab328245..dab20c7a5 100644
--- a/src/kudu/util/slru_cache.h
+++ b/src/kudu/util/slru_cache.h
@@ -27,6 +27,7 @@
#include "kudu/gutil/macros.h"
#include "kudu/util/alignment.h"
#include "kudu/util/cache.h"
+#include "kudu/util/cache_metrics.h"
#include "kudu/util/locks.h"
#include "kudu/util/slice.h"
@@ -43,7 +44,6 @@ namespace kudu {
// result of upgrade of entries would then be inserted into the MRU end of the
probationary segment.
class MemTracker;
-struct CacheMetrics;
struct SLRUHandle {
EvictionCallback* eviction_callback;
@@ -94,9 +94,14 @@ struct SLRUHandle {
next_hash = nullptr;
}
};
-// TODO(mreddy): Add an extra layer of inheritance to SLRUCacheShard for
ProtectedShard and
-// ProbationaryShard so segment-specific methods can be divided accordingly.
+
+enum class Segment {
+ kProbationary,
+ kProtected,
+};
+
// A single shard of sharded SLRU cache.
+template <Segment segment>
class SLRUCacheShard {
public:
SLRUCacheShard(MemTracker* tracker, size_t capacity);
@@ -106,6 +111,8 @@ class SLRUCacheShard {
return capacity_;
}
+ void SetMetrics(SLRUCacheMetrics* metrics) { metrics_ = metrics; }
+
// Inserts handle into shard and removes any entries if past capacity.
Handle* Insert(SLRUHandle* handle, EvictionCallback* eviction_callback);
// Same as Insert but also returns any evicted entries.
@@ -130,6 +137,10 @@ class SLRUCacheShard {
// Like Insert but sets refs to 1 and no possibility for upsert case.
// Used when evicted entries from protected segment are being added to
probationary segment.
void ReInsert(SLRUHandle* handle);
+ // Update the high-level metrics for a lookup operation.
+ void UpdateMetricsLookup(bool was_hit, bool caching);
+ // Update the segment-level metrics for a lookup operation.
+ void UpdateSegmentMetricsLookup(bool was_hit, bool caching);
private:
void RL_Remove(SLRUHandle* e);
@@ -144,6 +155,8 @@ class SLRUCacheShard {
// Updates memtracker to reflect entry being erased from cache.
// Unlike FreeEntry(), the eviction callback is not called and the entry is
not freed.
void SoftFreeEntry(SLRUHandle* e);
+ // Updates eviction related metrics.
+ void UpdateMetricsEviction(size_t charge);
// Update the memtracker's consumption by the given amount.
@@ -180,6 +193,8 @@ class SLRUCacheShard {
MemTracker* mem_tracker_;
std::atomic<int64_t> deferred_consumption_ { 0 };
+ SLRUCacheMetrics* metrics_;
+
// Initialized based on capacity_ to ensure an upper bound on the error on
the
// MemTracker consumption.
int64_t max_deferred_consumption_;
@@ -195,6 +210,8 @@ class SLRUCacheShardPair {
size_t protected_capacity,
uint32_t lookups);
+ void SetMetrics(SLRUCacheMetrics* metrics);
+
Handle* Insert(SLRUHandle* handle, EvictionCallback* eviction_callback);
// Like Cache::Lookup, but with an extra "hash" parameter.
Handle* Lookup(const Slice& key, uint32_t hash, bool caching);
@@ -204,8 +221,8 @@ class SLRUCacheShardPair {
bool ProtectedContains(const Slice& key, uint32_t hash);
private:
- SLRUCacheShard probationary_shard_;
- SLRUCacheShard protected_shard_;
+ SLRUCacheShard<Segment::kProbationary> probationary_shard_;
+ SLRUCacheShard<Segment::kProtected> protected_shard_;
// If an entry is looked up at least 'lookups_threshold_' times,
// it's upgraded to the protected segment.
@@ -239,7 +256,7 @@ class ShardedSLRUCache : public Cache {
EvictionCallback* eviction_callback) override;
void SetMetrics(std::unique_ptr<CacheMetrics> metrics,
- ExistingMetricsPolicy metrics_policy) override { }
+ ExistingMetricsPolicy metrics_policy) override;
size_t Invalidate(const InvalidationControl& ctl) override { return 0; }
@@ -261,11 +278,16 @@ class ShardedSLRUCache : public Cache {
// The destructor for 'shards_' must be called first since it releases all
the memory consumed.
std::shared_ptr<MemTracker> mem_tracker_;
+ std::unique_ptr<CacheMetrics> metrics_;
+
// The first shard in the pair belongs to the probationary segment, second
one to the protected.
std::vector<std::unique_ptr<SLRUCacheShardPair>> shards_;
// Number of bits of hash used to determine the shard.
const int shard_bits_;
+
+ // Used only when metrics are set to ensure that they are set only once in
test environments.
+ simple_spinlock metrics_lock_;
};
// Creates a new SLRU cache with 'probationary_capacity' being the capacity of