This is an automated email from the ASF dual-hosted git repository.
yiguolei pushed a commit to branch branch-4.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-4.0 by this push:
new 92b09c90846 branch-4.0: [test](cache) add regression test cases for
filecache statistics and cache eviction patterns #54062 (#56961)
92b09c90846 is described below
commit 92b09c908464fa94cc7342a46660ec6c438d3484
Author: github-actions[bot]
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Fri Oct 17 17:05:44 2025 +0800
branch-4.0: [test](cache) add regression test cases for filecache
statistics and cache eviction patterns #54062 (#56961)
Cherry-picked from #54062
Co-authored-by: Wen Zhenghu <[email protected]>
---
.../cache/test_file_cache_statistics.out | 5 +-
.../cache/test_file_cache_features.groovy | 222 +++++++++++++++++++++
.../cache/test_file_cache_statistics.groovy | 199 +++++++++++++++++-
3 files changed, 415 insertions(+), 11 deletions(-)
diff --git
a/regression-test/data/external_table_p0/cache/test_file_cache_statistics.out
b/regression-test/data/external_table_p0/cache/test_file_cache_statistics.out
index 39444cdeb5c..3a80f52eedd 100644
---
a/regression-test/data/external_table_p0/cache/test_file_cache_statistics.out
+++
b/regression-test/data/external_table_p0/cache/test_file_cache_statistics.out
@@ -1,4 +1,7 @@
-- This file is automatically generated. You should know what you did if you
want to edit this
+-- !1 --
+1 1534 5 24027 24.00 22824.48 0.10 0.04 N
O 1996-03-30 1996-03-14 1996-04-01 NONE FOB
pending foxes. slyly re cn beijing
+
-- !2 --
-1 7706 1 155190 17.00 21168.23 0.04 0.02 N
O 1996-03-13 1996-02-12 1996-03-22 DELIVER IN PERSON
TRUCK egular courts above the cn beijing
+1 1534 5 24027 24.00 22824.48 0.10 0.04 N
O 1996-03-30 1996-03-14 1996-04-01 NONE FOB
pending foxes. slyly re cn beijing
diff --git
a/regression-test/suites/external_table_p0/cache/test_file_cache_features.groovy
b/regression-test/suites/external_table_p0/cache/test_file_cache_features.groovy
new file mode 100644
index 00000000000..5951b9c79c5
--- /dev/null
+++
b/regression-test/suites/external_table_p0/cache/test_file_cache_features.groovy
@@ -0,0 +1,222 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import java.util.concurrent.TimeUnit;
+import org.awaitility.Awaitility;
+
+// Constants for file cache configuration
+final String BACKEND_CONFIG_CHECK_FAILED_PREFIX = "Backend configuration check
failed: "
+final String FILE_CACHE_FEATURES_CHECK_FAILED_PREFIX = "File cache features
check failed: "
+
+final String ENABLE_FILE_CACHE_CHECK_FAILED_MSG =
BACKEND_CONFIG_CHECK_FAILED_PREFIX + "enable_file_cache is not set to true"
+final String FILE_CACHE_PATH_CHECK_FAILED_MSG =
BACKEND_CONFIG_CHECK_FAILED_PREFIX + "file_cache_path is empty or not
configured"
+final String INITIAL_DISK_RESOURCE_LIMIT_MODE_CHECK_FAILED_MSG =
FILE_CACHE_FEATURES_CHECK_FAILED_PREFIX + "initial disk_resource_limit_mode
does not exist"
+final String INITIAL_NEED_EVICT_CACHE_IN_ADVANCE_CHECK_FAILED_MSG =
FILE_CACHE_FEATURES_CHECK_FAILED_PREFIX + "initial need_evict_cache_in_advance
does not exist"
+final String INITIAL_VALUES_NOT_ZERO_CHECK_FAILED_MSG =
FILE_CACHE_FEATURES_CHECK_FAILED_PREFIX + "initial values are not both 0 - "
+final String DISK_RESOURCE_LIMIT_MODE_TEST_FAILED_MSG = "Disk resource limit
mode test failed"
+final String NEED_EVICT_CACHE_IN_ADVANCE_TEST_FAILED_MSG = "Need evict cache
in advance test failed"
+
+suite("test_file_cache_features",
"external_docker,hive,external_docker_hive,p0,external,nonConcurrent") {
+ String enabled = context.config.otherConfigs.get("enableHiveTest")
+ if (enabled == null || !enabled.equalsIgnoreCase("true")) {
+ logger.info("diable Hive test.")
+ return;
+ }
+
+ // Check backend configuration prerequisites
+ // Note: This test case assumes a single backend scenario. Testing with
single backend is logically equivalent
+ // to testing with multiple backends having identical configurations, but
simpler in logic.
+ def enableFileCacheResult = sql """show backend config like
'enable_file_cache';"""
+ logger.info("enable_file_cache configuration: " + enableFileCacheResult)
+
+ if (enableFileCacheResult.size() == 0 ||
!enableFileCacheResult[0][3].equalsIgnoreCase("true")) {
+ logger.info(ENABLE_FILE_CACHE_CHECK_FAILED_MSG)
+ assertTrue(false, ENABLE_FILE_CACHE_CHECK_FAILED_MSG)
+ }
+
+ def fileCachePathResult = sql """show backend config like
'file_cache_path';"""
+ logger.info("file_cache_path configuration: " + fileCachePathResult)
+
+ if (fileCachePathResult.size() == 0 || fileCachePathResult[0][3] == null
|| fileCachePathResult[0][3].trim().isEmpty()) {
+ logger.info(FILE_CACHE_PATH_CHECK_FAILED_MSG)
+ assertTrue(false, FILE_CACHE_PATH_CHECK_FAILED_MSG)
+ }
+
+ String catalog_name = "test_file_cache_features"
+ String ex_db_name = "`tpch1_parquet`"
+ String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
+ String hms_port = context.config.otherConfigs.get(hivePrefix + "HmsPort")
+ String hdfs_port = context.config.otherConfigs.get(hivePrefix + "HdfsPort")
+
+ sql """set global enable_file_cache=true"""
+ sql """drop catalog if exists ${catalog_name} """
+
+ sql """CREATE CATALOG ${catalog_name} PROPERTIES (
+ 'type'='hms',
+ 'hive.metastore.uris' = 'thrift://${externalEnvIp}:${hms_port}',
+ 'hadoop.username' = 'hive'
+ );"""
+
+ sql """switch ${catalog_name}"""
+ sql """select l_returnflag, l_linestatus,
+ sum(l_quantity) as sum_qty,
+ sum(l_extendedprice) as sum_base_price,
+ sum(l_extendedprice * (1 - l_discount)) as sum_disc_price,
+ sum(l_extendedprice * (1 - l_discount) * (1 + l_tax)) as sum_charge,
+ avg(l_quantity) as avg_qty,
+ avg(l_extendedprice) as avg_price,
+ avg(l_discount) as avg_disc,
+ count(*) as count_order
+ from ${catalog_name}.${ex_db_name}.lineitem
+ where l_shipdate <= date '1998-12-01' - interval '90' day
+ group by l_returnflag, l_linestatus
+ order by l_returnflag, l_linestatus;"""
+
+ // Check file cache features
+ // ===== File Cache Features Metrics Check =====
+ // Get initial values for disk resource limit mode and cache eviction
advance
+ def initialDiskResourceLimitModeResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics
+ where METRIC_NAME = 'disk_resource_limit_mode' limit 1;"""
+ logger.info("Initial disk_resource_limit_mode result: " +
initialDiskResourceLimitModeResult)
+
+ def initialNeedEvictCacheInAdvanceResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics
+ where METRIC_NAME = 'need_evict_cache_in_advance' limit 1;"""
+ logger.info("Initial need_evict_cache_in_advance result: " +
initialNeedEvictCacheInAdvanceResult)
+
+ // Check if initial values exist
+ if (initialDiskResourceLimitModeResult.size() == 0) {
+ logger.info(INITIAL_DISK_RESOURCE_LIMIT_MODE_CHECK_FAILED_MSG)
+ assertTrue(false, INITIAL_DISK_RESOURCE_LIMIT_MODE_CHECK_FAILED_MSG)
+ }
+ if (initialNeedEvictCacheInAdvanceResult.size() == 0) {
+ logger.info(INITIAL_NEED_EVICT_CACHE_IN_ADVANCE_CHECK_FAILED_MSG)
+ assertTrue(false, INITIAL_NEED_EVICT_CACHE_IN_ADVANCE_CHECK_FAILED_MSG)
+ }
+
+ // Store initial values
+ double initialDiskResourceLimitMode =
Double.valueOf(initialDiskResourceLimitModeResult[0][0])
+ double initialNeedEvictCacheInAdvance =
Double.valueOf(initialNeedEvictCacheInAdvanceResult[0][0])
+
+ logger.info("Initial file cache features values -
disk_resource_limit_mode: ${initialDiskResourceLimitMode}, " +
+ "need_evict_cache_in_advance: ${initialNeedEvictCacheInAdvance}")
+
+ // Check if initial values are both 0
+ if (initialDiskResourceLimitMode != 0.0 || initialNeedEvictCacheInAdvance
!= 0.0) {
+ logger.info(INITIAL_VALUES_NOT_ZERO_CHECK_FAILED_MSG +
+ "disk_resource_limit_mode: ${initialDiskResourceLimitMode},
need_evict_cache_in_advance: ${initialNeedEvictCacheInAdvance}")
+ assertTrue(false, INITIAL_VALUES_NOT_ZERO_CHECK_FAILED_MSG +
+ "disk_resource_limit_mode: ${initialDiskResourceLimitMode},
need_evict_cache_in_advance: ${initialNeedEvictCacheInAdvance}")
+ }
+
+ // Set backend configuration parameters for testing
+ boolean diskResourceLimitModeTestPassed = true
+ setBeConfigTemporary([
+ "file_cache_enter_disk_resource_limit_mode_percent": "2",
+ "file_cache_exit_disk_resource_limit_mode_percent": "1"
+ ]) {
+ // Execute test logic with modified configuration
+ logger.info("Backend configuration set -
file_cache_enter_disk_resource_limit_mode_percent: 2, " +
+ "file_cache_exit_disk_resource_limit_mode_percent: 1")
+
+ // Wait for disk_resource_limit_mode metric to change to 1
+ try {
+ Awaitility.await().atMost(30, TimeUnit.SECONDS).pollInterval(2,
TimeUnit.SECONDS).until {
+ def updatedDiskResourceLimitModeResult = sql """select
METRIC_VALUE from information_schema.file_cache_statistics
+ where METRIC_NAME = 'disk_resource_limit_mode' limit 1;"""
+ logger.info("Checking disk_resource_limit_mode result: " +
updatedDiskResourceLimitModeResult)
+
+ if (updatedDiskResourceLimitModeResult.size() > 0) {
+ double updatedDiskResourceLimitMode =
Double.valueOf(updatedDiskResourceLimitModeResult[0][0])
+ logger.info("Current disk_resource_limit_mode value:
${updatedDiskResourceLimitMode}")
+
+ if (updatedDiskResourceLimitMode == 1.0) {
+ logger.info("Disk resource limit mode is now active
(value = 1)")
+ return true
+ } else {
+ logger.info("Disk resource limit mode is not yet
active (value = ${updatedDiskResourceLimitMode}), waiting...")
+ return false
+ }
+ } else {
+ logger.info("Failed to get disk_resource_limit_mode
metric, waiting...")
+ return false
+ }
+ }
+ } catch (Exception e) {
+ logger.info(DISK_RESOURCE_LIMIT_MODE_TEST_FAILED_MSG +
e.getMessage())
+ diskResourceLimitModeTestPassed = false
+ }
+ }
+
+ // Check disk resource limit mode test result
+ if (!diskResourceLimitModeTestPassed) {
+ logger.info(DISK_RESOURCE_LIMIT_MODE_TEST_FAILED_MSG)
+ assertTrue(false, DISK_RESOURCE_LIMIT_MODE_TEST_FAILED_MSG)
+ }
+
+ // Set backend configuration parameters for need_evict_cache_in_advance
testing
+ boolean needEvictCacheInAdvanceTestPassed = true
+ setBeConfigTemporary([
+ "enable_evict_file_cache_in_advance": "true",
+ "file_cache_enter_need_evict_cache_in_advance_percent": "2",
+ "file_cache_exit_need_evict_cache_in_advance_percent": "1"
+ ]) {
+ // Execute test logic with modified configuration for
need_evict_cache_in_advance
+ logger.info("Backend configuration set for need_evict_cache_in_advance
- " +
+ "enable_evict_file_cache_in_advance: true, " +
+ "file_cache_enter_need_evict_cache_in_advance_percent: 2, " +
+ "file_cache_exit_need_evict_cache_in_advance_percent: 1")
+
+ // Wait for need_evict_cache_in_advance metric to change to 1
+ try {
+ Awaitility.await().atMost(30, TimeUnit.SECONDS).pollInterval(2,
TimeUnit.SECONDS).until {
+ def updatedNeedEvictCacheInAdvanceResult = sql """select
METRIC_VALUE from information_schema.file_cache_statistics
+ where METRIC_NAME = 'need_evict_cache_in_advance' limit
1;"""
+ logger.info("Checking need_evict_cache_in_advance result: " +
updatedNeedEvictCacheInAdvanceResult)
+
+ if (updatedNeedEvictCacheInAdvanceResult.size() > 0) {
+ double updatedNeedEvictCacheInAdvance =
Double.valueOf(updatedNeedEvictCacheInAdvanceResult[0][0])
+ logger.info("Current need_evict_cache_in_advance value:
${updatedNeedEvictCacheInAdvance}")
+
+ if (updatedNeedEvictCacheInAdvance == 1.0) {
+ logger.info("Need evict cache in advance mode is now
active (value = 1)")
+ return true
+ } else {
+ logger.info("Need evict cache in advance mode is not
yet active (value = ${updatedNeedEvictCacheInAdvance}), waiting...")
+ return false
+ }
+ } else {
+ logger.info("Failed to get need_evict_cache_in_advance
metric, waiting...")
+ return false
+ }
+ }
+ } catch (Exception e) {
+ logger.info(NEED_EVICT_CACHE_IN_ADVANCE_TEST_FAILED_MSG +
e.getMessage())
+ needEvictCacheInAdvanceTestPassed = false
+ }
+ }
+
+ // Check need evict cache in advance test result
+ if (!needEvictCacheInAdvanceTestPassed) {
+ logger.info(NEED_EVICT_CACHE_IN_ADVANCE_TEST_FAILED_MSG)
+ assertTrue(false, NEED_EVICT_CACHE_IN_ADVANCE_TEST_FAILED_MSG)
+ }
+ // ===== End File Cache Features Metrics Check =====
+
+ sql """set global enable_file_cache=false"""
+ return true
+}
+
diff --git
a/regression-test/suites/external_table_p0/cache/test_file_cache_statistics.groovy
b/regression-test/suites/external_table_p0/cache/test_file_cache_statistics.groovy
index a4796c73d4e..b0984445e5f 100644
---
a/regression-test/suites/external_table_p0/cache/test_file_cache_statistics.groovy
+++
b/regression-test/suites/external_table_p0/cache/test_file_cache_statistics.groovy
@@ -18,20 +18,62 @@
import java.util.concurrent.TimeUnit;
import org.awaitility.Awaitility;
-suite("test_file_cache_statistics",
"external_docker,hive,external_docker_hive,p0,external") {
+// Constants for backend configuration check
+final String BACKEND_CONFIG_CHECK_FAILED_PREFIX = "Backend configuration check
failed: "
+final String ENABLE_FILE_CACHE_CHECK_FAILED_MSG =
BACKEND_CONFIG_CHECK_FAILED_PREFIX + "enable_file_cache is not set to true"
+final String FILE_CACHE_PATH_CHECK_FAILED_MSG =
BACKEND_CONFIG_CHECK_FAILED_PREFIX + "file_cache_path is empty or not
configured"
+
+// Constants for hit ratio check
+final String HIT_RATIO_CHECK_FAILED_PREFIX = "Hit ratio check failed: "
+final String HIT_RATIO_METRIC_FALSE_MSG = HIT_RATIO_CHECK_FAILED_PREFIX +
"hits_ratio metric is false"
+final String HIT_RATIO_1H_METRIC_FALSE_MSG = HIT_RATIO_CHECK_FAILED_PREFIX +
"hits_ratio_1h metric is false"
+final String HIT_RATIO_5M_METRIC_FALSE_MSG = HIT_RATIO_CHECK_FAILED_PREFIX +
"hits_ratio_5m metric is false"
+
+// Constants for normal queue check
+final String NORMAL_QUEUE_CHECK_FAILED_PREFIX = "Normal queue check failed: "
+final String NORMAL_QUEUE_SIZE_VALIDATION_FAILED_MSG =
NORMAL_QUEUE_CHECK_FAILED_PREFIX + "size validation failed (curr_size should be
> 0 and < max_size)"
+final String NORMAL_QUEUE_ELEMENTS_VALIDATION_FAILED_MSG =
NORMAL_QUEUE_CHECK_FAILED_PREFIX + "elements validation failed (curr_elements
should be > 0 and < max_elements)"
+
+// Constants for hit and read counts check
+final String HIT_AND_READ_COUNTS_CHECK_FAILED_PREFIX = "Hit and read counts
check failed: "
+final String INITIAL_TOTAL_HIT_COUNTS_NOT_GREATER_THAN_0_MSG =
HIT_AND_READ_COUNTS_CHECK_FAILED_PREFIX + "initial total_hit_counts is not
greater than 0"
+final String INITIAL_TOTAL_READ_COUNTS_NOT_GREATER_THAN_0_MSG =
HIT_AND_READ_COUNTS_CHECK_FAILED_PREFIX + "initial total_read_counts is not
greater than 0"
+final String TOTAL_HIT_COUNTS_DID_NOT_INCREASE_MSG =
HIT_AND_READ_COUNTS_CHECK_FAILED_PREFIX + "total_hit_counts did not increase
after cache operation"
+final String TOTAL_READ_COUNTS_DID_NOT_INCREASE_MSG =
HIT_AND_READ_COUNTS_CHECK_FAILED_PREFIX + "total_read_counts did not increase
after cache operation"
+
+suite("test_file_cache_statistics",
"external_docker,hive,external_docker_hive,p0,external,nonConcurrent") {
String enabled = context.config.otherConfigs.get("enableHiveTest")
if (enabled == null || !enabled.equalsIgnoreCase("true")) {
logger.info("diable Hive test.")
return;
}
+ // Check backend configuration prerequisites
+ // Note: This test case assumes a single backend scenario. Testing with
single backend is logically equivalent
+ // to testing with multiple backends having identical configurations, but
simpler in logic.
+ def enableFileCacheResult = sql """show backend config like
'enable_file_cache';"""
+ logger.info("enable_file_cache configuration: " + enableFileCacheResult)
+
+ if (enableFileCacheResult.size() == 0 ||
!enableFileCacheResult[0][3].equalsIgnoreCase("true")) {
+ logger.info(ENABLE_FILE_CACHE_CHECK_FAILED_MSG)
+ assertTrue(false, ENABLE_FILE_CACHE_CHECK_FAILED_MSG)
+ }
+
+ def fileCachePathResult = sql """show backend config like
'file_cache_path';"""
+ logger.info("file_cache_path configuration: " + fileCachePathResult)
+
+ if (fileCachePathResult.size() == 0 || fileCachePathResult[0][3] == null
|| fileCachePathResult[0][3].trim().isEmpty()) {
+ logger.info(FILE_CACHE_PATH_CHECK_FAILED_MSG)
+ assertTrue(false, FILE_CACHE_PATH_CHECK_FAILED_MSG)
+ }
+
String catalog_name = "test_file_cache_statistics"
String ex_db_name = "`default`"
String externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
String hms_port = context.config.otherConfigs.get(hivePrefix + "HmsPort")
String hdfs_port = context.config.otherConfigs.get(hivePrefix + "HdfsPort")
- sql """set enable_file_cache=true"""
+ sql """set global enable_file_cache=true"""
sql """drop catalog if exists ${catalog_name} """
sql """CREATE CATALOG ${catalog_name} PROPERTIES (
@@ -42,18 +84,155 @@ suite("test_file_cache_statistics",
"external_docker,hive,external_docker_hive,p
sql """switch ${catalog_name}"""
- order_qt_2 """select * from
${catalog_name}.${ex_db_name}.parquet_partition_table order by l_orderkey limit
1;"""
+ // load the table into file cache
+ sql """select * from ${catalog_name}.${ex_db_name}.parquet_partition_table
where l_orderkey=1 and l_partkey=1534 limit 1;"""
+ // do it twice to make sure the table block could hit the cache
+ order_qt_1 """select * from
${catalog_name}.${ex_db_name}.parquet_partition_table where l_orderkey=1 and
l_partkey=1534 limit 1;"""
+
// brpc metrics will be updated at most 20 seconds
Awaitility.await().atMost(30, TimeUnit.SECONDS).pollInterval(1,
TimeUnit.SECONDS).until{
- def result = sql """select METRIC_VALUE from
information_schema.file_cache_statistics where METRIC_NAME like "%hits_ratio%"
order by METRIC_VALUE limit 1;"""
- logger.info("result " + result)
- if (result.size() == 0) {
- return false;
+ // ===== Hit Ratio Metrics Check =====
+ // Check overall hit ratio hits_ratio
+ def hitsRatioResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics where METRIC_NAME = 'hits_ratio' limit
1;"""
+ logger.info("hits_ratio result: " + hitsRatioResult)
+
+ // Check 1-hour hit ratio hits_ratio_1h
+ def hitsRatio1hResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics where METRIC_NAME = 'hits_ratio_1h'
limit 1;"""
+ logger.info("hits_ratio_1h result: " + hitsRatio1hResult)
+
+ // Check 5-minute hit ratio hits_ratio_5m
+ def hitsRatio5mResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics where METRIC_NAME = 'hits_ratio_5m'
limit 1;"""
+ logger.info("hits_ratio_5m result: " + hitsRatio5mResult)
+
+ // Check if all three metrics exist and are greater than 0
+ boolean hasHitsRatio = hitsRatioResult.size() > 0 &&
Double.valueOf(hitsRatioResult[0][0]) > 0
+ boolean hasHitsRatio1h = hitsRatio1hResult.size() > 0 &&
Double.valueOf(hitsRatio1hResult[0][0]) > 0
+ boolean hasHitsRatio5m = hitsRatio5mResult.size() > 0 &&
Double.valueOf(hitsRatio5mResult[0][0]) > 0
+
+ logger.info("Hit ratio metrics check result - hits_ratio:
${hasHitsRatio}, hits_ratio_1h: ${hasHitsRatio1h}, hits_ratio_5m:
${hasHitsRatio5m}")
+
+ // Return false if any metric is false, otherwise return true
+ if (!hasHitsRatio) {
+ logger.info(HIT_RATIO_METRIC_FALSE_MSG)
+ assertTrue(false, HIT_RATIO_METRIC_FALSE_MSG)
+ }
+ if (!hasHitsRatio1h) {
+ logger.info(HIT_RATIO_1H_METRIC_FALSE_MSG)
+ assertTrue(false, HIT_RATIO_1H_METRIC_FALSE_MSG)
+ }
+ if (!hasHitsRatio5m) {
+ logger.info(HIT_RATIO_5M_METRIC_FALSE_MSG)
+ assertTrue(false, HIT_RATIO_5M_METRIC_FALSE_MSG)
+ }
+ // ===== End Hit Ratio Metrics Check =====
+
+ // ===== Normal Queue Metrics Check =====
+ // Check normal queue current size and max size
+ def normalQueueCurrSizeResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics
+ where METRIC_NAME = 'normal_queue_curr_size' limit 1;"""
+ logger.info("normal_queue_curr_size result: " +
normalQueueCurrSizeResult)
+
+ def normalQueueMaxSizeResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics
+ where METRIC_NAME = 'normal_queue_max_size' limit 1;"""
+ logger.info("normal_queue_max_size result: " +
normalQueueMaxSizeResult)
+
+ // Check normal queue current elements and max elements
+ def normalQueueCurrElementsResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics
+ where METRIC_NAME = 'normal_queue_curr_elements' limit 1;"""
+ logger.info("normal_queue_curr_elements result: " +
normalQueueCurrElementsResult)
+
+ def normalQueueMaxElementsResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics
+ where METRIC_NAME = 'normal_queue_max_elements' limit 1;"""
+ logger.info("normal_queue_max_elements result: " +
normalQueueMaxElementsResult)
+
+ // Check normal queue size metrics
+ boolean hasNormalQueueCurrSize = normalQueueCurrSizeResult.size() > 0
&&
+ Double.valueOf(normalQueueCurrSizeResult[0][0]) > 0
+ boolean hasNormalQueueMaxSize = normalQueueMaxSizeResult.size() > 0 &&
+ Double.valueOf(normalQueueMaxSizeResult[0][0]) > 0
+ boolean hasNormalQueueCurrElements =
normalQueueCurrElementsResult.size() > 0 &&
+ Double.valueOf(normalQueueCurrElementsResult[0][0]) > 0
+ boolean hasNormalQueueMaxElements =
normalQueueMaxElementsResult.size() > 0 &&
+ Double.valueOf(normalQueueMaxElementsResult[0][0]) > 0
+
+ // Check if current size is less than max size and current elements is
less than max elements
+ boolean normalQueueSizeValid = hasNormalQueueCurrSize &&
hasNormalQueueMaxSize &&
+ Double.valueOf(normalQueueCurrSizeResult[0][0]) <
Double.valueOf(normalQueueMaxSizeResult[0][0])
+ boolean normalQueueElementsValid = hasNormalQueueCurrElements &&
hasNormalQueueMaxElements &&
+ Double.valueOf(normalQueueCurrElementsResult[0][0]) <
Double.valueOf(normalQueueMaxElementsResult[0][0])
+
+ logger.info("Normal queue metrics check result - size valid:
${normalQueueSizeValid}, " +
+ "elements valid: ${normalQueueElementsValid}")
+
+ if (!normalQueueSizeValid) {
+ logger.info(NORMAL_QUEUE_SIZE_VALIDATION_FAILED_MSG)
+ assertTrue(false, NORMAL_QUEUE_SIZE_VALIDATION_FAILED_MSG)
+ }
+ if (!normalQueueElementsValid) {
+ logger.info(NORMAL_QUEUE_ELEMENTS_VALIDATION_FAILED_MSG)
+ assertTrue(false, NORMAL_QUEUE_ELEMENTS_VALIDATION_FAILED_MSG)
+ }
+ // ===== End Normal Queue Metrics Check =====
+
+ // ===== Hit and Read Counts Metrics Check =====
+ // Get initial values for hit and read counts
+ def initialHitCountsResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics
+ where METRIC_NAME = 'total_hit_counts' limit 1;"""
+ logger.info("Initial total_hit_counts result: " +
initialHitCountsResult)
+
+ def initialReadCountsResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics
+ where METRIC_NAME = 'total_read_counts' limit 1;"""
+ logger.info("Initial total_read_counts result: " +
initialReadCountsResult)
+
+ // Check if initial values exist and are greater than 0
+ if (initialHitCountsResult.size() == 0 ||
Double.valueOf(initialHitCountsResult[0][0]) <= 0) {
+ logger.info(INITIAL_TOTAL_HIT_COUNTS_NOT_GREATER_THAN_0_MSG)
+ assertTrue(false, INITIAL_TOTAL_HIT_COUNTS_NOT_GREATER_THAN_0_MSG)
+ }
+ if (initialReadCountsResult.size() == 0 ||
Double.valueOf(initialReadCountsResult[0][0]) <= 0) {
+ logger.info(INITIAL_TOTAL_READ_COUNTS_NOT_GREATER_THAN_0_MSG)
+ assertTrue(false, INITIAL_TOTAL_READ_COUNTS_NOT_GREATER_THAN_0_MSG)
+ }
+
+ // Store initial values
+ double initialHitCounts = Double.valueOf(initialHitCountsResult[0][0])
+ double initialReadCounts =
Double.valueOf(initialReadCountsResult[0][0])
+
+ // Execute the same query to trigger cache operations
+ order_qt_2 """select * from
${catalog_name}.${ex_db_name}.parquet_partition_table
+ where l_orderkey=1 and l_partkey=1534 limit 1;"""
+
+ // Get updated values after cache operations
+ def updatedHitCountsResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics
+ where METRIC_NAME = 'total_hit_counts' limit 1;"""
+ logger.info("Updated total_hit_counts result: " +
updatedHitCountsResult)
+
+ def updatedReadCountsResult = sql """select METRIC_VALUE from
information_schema.file_cache_statistics
+ where METRIC_NAME = 'total_read_counts' limit 1;"""
+ logger.info("Updated total_read_counts result: " +
updatedReadCountsResult)
+
+ // Check if updated values are greater than initial values
+ double updatedHitCounts = Double.valueOf(updatedHitCountsResult[0][0])
+ double updatedReadCounts =
Double.valueOf(updatedReadCountsResult[0][0])
+
+ boolean hitCountsIncreased = updatedHitCounts > initialHitCounts
+ boolean readCountsIncreased = updatedReadCounts > initialReadCounts
+
+ logger.info("Hit and read counts comparison - hit_counts:
${initialHitCounts} -> " +
+ "${updatedHitCounts} (increased: ${hitCountsIncreased}),
read_counts: ${initialReadCounts} -> " +
+ "${updatedReadCounts} (increased: ${readCountsIncreased})")
+
+ if (!hitCountsIncreased) {
+ logger.info(TOTAL_HIT_COUNTS_DID_NOT_INCREASE_MSG)
+ assertTrue(false, TOTAL_HIT_COUNTS_DID_NOT_INCREASE_MSG)
}
- if (Double.valueOf(result[0][0]) > 0) {
- return true;
+ if (!readCountsIncreased) {
+ logger.info(TOTAL_READ_COUNTS_DID_NOT_INCREASE_MSG)
+ assertTrue(false, TOTAL_READ_COUNTS_DID_NOT_INCREASE_MSG)
}
- return false;
+ // ===== End Hit and Read Counts Metrics Check =====
+ sql """set global enable_file_cache=false"""
+ return true
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]