This is an automated email from the ASF dual-hosted git repository. morningman pushed a commit to branch branch-1.2-lts in repository https://gitbox.apache.org/repos/asf/doris.git
commit df4672e9f1d700cd0b0cc80a72c0815e17aaf24b Author: zhannngchen <[email protected]> AuthorDate: Thu Feb 23 18:24:18 2023 +0800 [enhancement](cache) make segment cache prune more effectively (#17011) BloomFilter in MoW table may consume lots of memory, and it's life cycle is same as segment. This patch try to improve the efficiency of recycling segment cache, to release the memory in time. --- be/src/common/config.h | 2 +- be/src/olap/lru_cache.cpp | 10 ++++++--- be/src/olap/lru_cache.h | 6 ++--- be/src/olap/segment_loader.cpp | 3 ++- be/test/olap/lru_cache_test.cpp | 50 +++++++++++++++++++++++++++++++++++++++++ 5 files changed, 63 insertions(+), 8 deletions(-) diff --git a/be/src/common/config.h b/be/src/common/config.h index 49e8f09983..b278a9c55c 100644 --- a/be/src/common/config.h +++ b/be/src/common/config.h @@ -216,7 +216,7 @@ CONF_mInt64(memory_limitation_per_thread_for_schema_change_bytes, "2147483648"); CONF_mInt64(memory_limitation_per_thread_for_storage_migration_bytes, "100000000"); // the clean interval of file descriptor cache and segment cache -CONF_mInt32(cache_clean_interval, "1800"); +CONF_mInt32(cache_clean_interval, "60"); CONF_mInt32(disk_stat_monitor_interval, "5"); CONF_mInt32(unused_rowset_monitor_interval, "30"); CONF_String(storage_root_path, "${DORIS_HOME}/storage"); diff --git a/be/src/olap/lru_cache.cpp b/be/src/olap/lru_cache.cpp index 9b4fa21fbc..0eb6fe53bb 100644 --- a/be/src/olap/lru_cache.cpp +++ b/be/src/olap/lru_cache.cpp @@ -392,7 +392,7 @@ int64_t LRUCache::prune() { return pruned_count; } -int64_t LRUCache::prune_if(CacheValuePredicate pred) { +int64_t LRUCache::prune_if(CacheValuePredicate pred, bool lazy_mode) { LRUHandle* to_remove_head = nullptr; { std::lock_guard<std::mutex> l(_mutex); @@ -403,6 +403,8 @@ int64_t LRUCache::prune_if(CacheValuePredicate pred) { _evict_one_entry(p); p->next = to_remove_head; to_remove_head = p; + } else if (lazy_mode) { + break; } p = next; } @@ -414,6 +416,8 @@ int64_t LRUCache::prune_if(CacheValuePredicate pred) { _evict_one_entry(p); p->next = to_remove_head; to_remove_head = p; + } else if (lazy_mode) { + break; } p = next; } @@ -518,10 +522,10 @@ int64_t ShardedLRUCache::prune() { return num_prune; } -int64_t ShardedLRUCache::prune_if(CacheValuePredicate pred) { +int64_t ShardedLRUCache::prune_if(CacheValuePredicate pred, bool lazy_mode) { int64_t num_prune = 0; for (int s = 0; s < _num_shards; s++) { - num_prune += _shards[s]->prune_if(pred); + num_prune += _shards[s]->prune_if(pred, lazy_mode); } return num_prune; } diff --git a/be/src/olap/lru_cache.h b/be/src/olap/lru_cache.h index a6e2f81fb4..8d86fa9b1b 100644 --- a/be/src/olap/lru_cache.h +++ b/be/src/olap/lru_cache.h @@ -215,7 +215,7 @@ public: // Same as prune(), but the entry will only be pruned if the predicate matched. // NOTICE: the predicate should be simple enough, or the prune_if() function // may hold lock for a long time to execute predicate. - virtual int64_t prune_if(CacheValuePredicate pred) { return 0; } + virtual int64_t prune_if(CacheValuePredicate pred, bool lazy_mode = false) { return 0; } virtual int64_t mem_consumption() = 0; @@ -320,7 +320,7 @@ public: void release(Cache::Handle* handle); void erase(const CacheKey& key, uint32_t hash); int64_t prune(); - int64_t prune_if(CacheValuePredicate pred); + int64_t prune_if(CacheValuePredicate pred, bool lazy_mode = false); uint64_t get_lookup_count() const { return _lookup_count; } uint64_t get_hit_count() const { return _hit_count; } @@ -374,7 +374,7 @@ public: Slice value_slice(Handle* handle) override; virtual uint64_t new_id() override; virtual int64_t prune() override; - virtual int64_t prune_if(CacheValuePredicate pred) override; + int64_t prune_if(CacheValuePredicate pred, bool lazy_mode = false) override; int64_t mem_consumption() override; private: diff --git a/be/src/olap/segment_loader.cpp b/be/src/olap/segment_loader.cpp index 3e604adf17..fa1e6c8751 100644 --- a/be/src/olap/segment_loader.cpp +++ b/be/src/olap/segment_loader.cpp @@ -97,7 +97,8 @@ Status SegmentLoader::prune() { MonotonicStopWatch watch; watch.start(); - int64_t prune_num = _cache->prune_if(pred); + // Prune cache in lazy mode to save cpu and minimize the time holding write lock + int64_t prune_num = _cache->prune_if(pred, true); LOG(INFO) << "prune " << prune_num << " entries in segment cache. cost(ms): " << watch.elapsed_time() / 1000 / 1000; return Status::OK(); diff --git a/be/test/olap/lru_cache_test.cpp b/be/test/olap/lru_cache_test.cpp index c779996c54..83604a26bb 100644 --- a/be/test/olap/lru_cache_test.cpp +++ b/be/test/olap/lru_cache_test.cpp @@ -314,6 +314,56 @@ TEST_F(CacheTest, Prune) { EXPECT_EQ(0, cache.get_usage()); } +TEST_F(CacheTest, PruneIfLazyMode) { + LRUCache cache(LRUCacheType::NUMBER); + cache.set_capacity(10); + + // The lru usage is 1, add one entry + CacheKey key1("100"); + insert_LRUCache(cache, key1, 100, CachePriority::NORMAL); + EXPECT_EQ(1, cache.get_usage()); + + CacheKey key2("200"); + insert_LRUCache(cache, key2, 200, CachePriority::DURABLE); + EXPECT_EQ(2, cache.get_usage()); + + CacheKey key3("300"); + insert_LRUCache(cache, key3, 300, CachePriority::NORMAL); + EXPECT_EQ(3, cache.get_usage()); + + CacheKey key4("666"); + insert_LRUCache(cache, key4, 666, CachePriority::NORMAL); + EXPECT_EQ(4, cache.get_usage()); + + CacheKey key5("500"); + insert_LRUCache(cache, key5, 500, CachePriority::NORMAL); + EXPECT_EQ(5, cache.get_usage()); + + CacheKey key6("600"); + insert_LRUCache(cache, key6, 600, CachePriority::NORMAL); + EXPECT_EQ(6, cache.get_usage()); + + CacheKey key7("700"); + insert_LRUCache(cache, key7, 700, CachePriority::DURABLE); + EXPECT_EQ(7, cache.get_usage()); + + auto pred = [](const void* value) -> bool { return false; }; + cache.prune_if(pred, true); + EXPECT_EQ(7, cache.get_usage()); + + // in lazy mode, the first item not satisfied the pred2, `prune_if` then stopped + // and no item's removed. + auto pred2 = [](const void* value) -> bool { return DecodeValue((void*)value) > 400; }; + cache.prune_if(pred2, true); + EXPECT_EQ(7, cache.get_usage()); + + // in normal priority, 100, 300 are removed + // in durable priority, 200 is removed + auto pred3 = [](const void* value) -> bool { return DecodeValue((void*)value) <= 600; }; + EXPECT_EQ(3, cache.prune_if(pred3, true)); + EXPECT_EQ(4, cache.get_usage()); +} + TEST_F(CacheTest, HeavyEntries) { // Add a bunch of light and heavy entries and then count the combined // size of items still in the cache, which must be approximately the --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
