devmadhuu commented on code in PR #9258:
URL: https://github.com/apache/ozone/pull/9258#discussion_r2975251927


##########
hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUnhealthyContainersDerbyPerformance.java:
##########
@@ -0,0 +1,714 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon.persistence;
+
+import static 
org.apache.ozone.recon.schema.generated.tables.UnhealthyContainersTable.UNHEALTHY_CONTAINERS;
+import static org.jooq.impl.DSL.count;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import com.google.inject.AbstractModule;
+import com.google.inject.Guice;
+import com.google.inject.Injector;
+import com.google.inject.Provider;
+import java.io.File;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.EnumMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+import 
org.apache.hadoop.ozone.recon.ReconControllerModule.ReconDaoBindingModule;
+import org.apache.hadoop.ozone.recon.ReconSchemaManager;
+import 
org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest.DerbyDataSourceConfigurationProvider;
+import 
org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager.UnhealthyContainerRecord;
+import 
org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager.UnhealthyContainersSummary;
+import org.apache.ozone.recon.schema.ContainerSchemaDefinition;
+import 
org.apache.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates;
+import org.apache.ozone.recon.schema.ReconSchemaGenerationModule;
+import 
org.apache.ozone.recon.schema.generated.tables.daos.UnhealthyContainersDao;
+import org.jooq.DSLContext;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.MethodOrderer;
+import org.junit.jupiter.api.Order;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.api.TestMethodOrder;
+import org.junit.jupiter.api.io.TempDir;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Performance benchmark for the UNHEALTHY_CONTAINERS Derby table at 1 million
+ * records scale.
+ *
+ * <h2>Data layout</h2>
+ * <pre>
+ *   Container IDs  : 1 – 200,000  (CONTAINER_ID_RANGE)
+ *   States per ID  : 5  (UNDER_REPLICATED, MISSING, OVER_REPLICATED,
+ *                        MIS_REPLICATED, EMPTY_MISSING)
+ *   Total records  : 200,000 × 5 = 1,000,000
+ *   Primary key    : (container_id, container_state)  — unique per pair
+ *   Index          : idx_state_container_id on (container_state, container_id)
+ *                    composite index supports both aggregates (COUNT/GROUP BY
+ *                    on state prefix) and O(1)-per-page cursor pagination
+ * </pre>
+ *
+ * <h2>Performance settings applied in this test</h2>
+ * <ul>
+ *   <li><b>Page cache</b>: {@code derby.storage.pageCacheSize = 20000}
+ *       (~80 MB of 4-KB pages) keeps hot B-tree nodes in memory, reducing
+ *       filesystem reads even with the file-based Derby driver.</li>
+ *   <li><b>JDBC fetch size</b>: set to {@value #READ_PAGE_SIZE} on each query
+ *       so Derby pre-buffers a full page of rows per JDBC round-trip instead
+ *       of the default 1-row-at-a-time fetch.</li>
+ *   <li><b>Large page size</b>: {@value #READ_PAGE_SIZE} rows per SQL fetch
+ *       reduces the number of SQL round-trips from 200 (@ 1 K rows) to 40
+ *       (@ 5 K rows) per 200 K-row state scan.</li>
+ *   <li><b>Large delete chunks</b>: {@value #DELETE_CHUNK_SIZE} IDs per
+ *       DELETE statement reduces Derby plan-compilation overhead from 100
+ *       statements to 20 for a 100 K-ID batch delete.</li>
+ * </ul>
+ *
+ * <h2>What is measured</h2>
+ * <ol>
+ *   <li><b>Bulk INSERT throughput</b> – 1 M records via JOOQ batchInsert in
+ *       chunks of 1,000 inside a single Derby transaction.</li>
+ *   <li><b>COUNT(*) by state</b> – index-covered aggregate, one per 
state.</li>
+ *   <li><b>GROUP BY summary</b> – single pass over the idx_container_state
+ *       index to aggregate all states.</li>
+ *   <li><b>Paginated SELECT by state</b> – cursor-style walk using
+ *       minContainerId / maxContainerId to fetch the full 200 K rows of one
+ *       state in pages of {@value #READ_PAGE_SIZE}, without loading all rows
+ *       into the JVM heap at once.</li>
+ *   <li><b>Batch DELETE throughput</b> – removes records for half the
+ *       container IDs (100 K × 5 states = 500 K rows) via a single
+ *       IN-clause DELETE.</li>
+ * </ol>
+ *
+ * <h2>Design notes</h2>
+ * <ul>
+ *   <li>Derby is an embedded, single-file Java database — not designed for
+ *       production-scale workloads. Performance numbers here document its
+ *       baseline behaviour and will flag regressions, but should not be
+ *       compared with PostgreSQL / MySQL numbers.</li>
+ *   <li>Timing thresholds are deliberately generous (≈ 10× expected) to be
+ *       stable on slow CI machines. Actual durations are always logged.</li>
+ *   <li>Uses {@code @TestInstance(PER_CLASS)} so the 1 M-row dataset is
+ *       inserted exactly once in {@code @BeforeAll} and shared across all
+ *       {@code @Test} methods in the class.</li>
+ * </ul>
+ */
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+@TestMethodOrder(MethodOrderer.OrderAnnotation.class)
+public class TestUnhealthyContainersDerbyPerformance {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(TestUnhealthyContainersDerbyPerformance.class);
+
+  // -----------------------------------------------------------------------
+  // Dataset constants
+  // -----------------------------------------------------------------------
+
+  /** Number of unique container IDs.  Each ID appears in every TESTED_STATES. 
*/
+  private static final int CONTAINER_ID_RANGE = 200_000;
+
+  /** States distributed across all container IDs. */
+  private static final List<UnHealthyContainerStates> TESTED_STATES = 
Arrays.asList(
+      UnHealthyContainerStates.UNDER_REPLICATED,
+      UnHealthyContainerStates.MISSING,
+      UnHealthyContainerStates.OVER_REPLICATED,
+      UnHealthyContainerStates.MIS_REPLICATED,
+      UnHealthyContainerStates.EMPTY_MISSING);
+
+  /** Number of tested states (equals TESTED_STATES.size()). */
+  private static final int STATE_COUNT = 5;
+
+  /** Total records = CONTAINER_ID_RANGE × STATE_COUNT. */
+  private static final int TOTAL_RECORDS = CONTAINER_ID_RANGE * STATE_COUNT;
+
+  /**
+   * Number of containers inserted per transaction.
+   *
+   * <p>Derby's WAL (Write-Ahead Log) must hold all uncommitted rows before
+   * a transaction commits.  Inserting all 1 M rows in one transaction causes
+   * Derby to exhaust its log buffer and hang indefinitely.  Committing in
+   * chunks of {@value} containers ({@value} × 5 states = 10,000 rows/tx)
+   * lets Derby flush the log after each commit, keeping each transaction
+   * fast and bounded in memory usage.</p>
+   */
+  private static final int CONTAINERS_PER_TX = 2_000;   // 2 000 × 5 = 10 000 
rows/tx
+
+  /**
+   * Number of container IDs to pass per
+   * {@link ContainerHealthSchemaManager#batchDeleteSCMStatesForContainers}
+   * call in the delete test.
+   *
+   * <p>{@code batchDeleteSCMStatesForContainers} now handles internal
+   * chunking at {@link ContainerHealthSchemaManager#MAX_DELETE_CHUNK_SIZE}
+   * ({@value ContainerHealthSchemaManager#MAX_DELETE_CHUNK_SIZE} IDs per
+   * SQL statement) to stay within Derby's 64 KB generated-bytecode limit
+   * (ERROR XBCM4).  This test-level constant controls how many IDs are
+   * accumulated before each call and should match that limit so the test
+   * exercises exactly one SQL DELETE per call.</p>
+   */
+  private static final int DELETE_CHUNK_SIZE = 1_000;   // matches 
MAX_DELETE_CHUNK_SIZE
+
+  /**
+   * Number of records returned per page in the paginated-read tests.
+   *
+   * <p>5,000 rows per page means only 40 SQL round-trips to scan 200,000
+   * records for a single state, compared to 200 trips at the old 1,000-row
+   * page size.  Combined with {@code query.fetchSize(READ_PAGE_SIZE)} this
+   * cuts round-trip overhead by 80% while keeping per-page heap usage well
+   * below 1 MB.</p>
+   */
+  private static final int READ_PAGE_SIZE = 5_000;
+
+  // -----------------------------------------------------------------------
+  // Performance thresholds (CI-safe; expected run times are 5–10× faster
+  // than the original file-based Derby baseline after the optimisations)
+  // -----------------------------------------------------------------------
+
+  /** Maximum acceptable time to insert all TOTAL_RECORDS into Derby. */
+  private static final long MAX_INSERT_SECONDS = 300;
+
+  /** Maximum acceptable time for a single COUNT(*)-by-state query. */
+  private static final long MAX_COUNT_BY_STATE_SECONDS = 30;
+
+  /** Maximum acceptable time for the GROUP-BY summary query. */
+  private static final long MAX_SUMMARY_SECONDS = 30;
+
+  /**
+   * Maximum acceptable time to page through all CONTAINER_ID_RANGE records
+   * of a single state using {@link #READ_PAGE_SIZE}-row pages.
+   */
+  private static final long MAX_PAGINATED_READ_SECONDS = 60;
+
+  /** Maximum acceptable time to batch-delete 500 K rows. */
+  private static final long MAX_DELETE_SECONDS = 60;
+
+  // -----------------------------------------------------------------------
+  // Infrastructure (shared for the life of this test class)
+  // -----------------------------------------------------------------------
+
+  private ContainerHealthSchemaManager schemaManager;
+  private UnhealthyContainersDao dao;
+  private ContainerSchemaDefinition schemaDefinition;
+
+  // -----------------------------------------------------------------------
+  // One-time setup: create Derby schema + insert 1 M records
+  // -----------------------------------------------------------------------
+
+  /**
+   * Initialises the embedded Derby database, creates the Recon schema, and
+   * inserts {@value #TOTAL_RECORDS} records.  This runs exactly once for the
+   * entire test class.
+   *
+   * <p>The {@code @TempDir} is injected as a <em>method parameter</em> rather
+   * than a class field.  With {@code @TestInstance(PER_CLASS)}, a field-level
+   * {@code @TempDir} is populated by JUnit's {@code TempDirExtension} in its
+   * own {@code beforeAll} callback, which may run <em>after</em> the user's
+   * {@code @BeforeAll} — leaving it null when needed here.  A method
+   * parameter is resolved by JUnit before the method body executes.</p>
+   *
+   * <h3>Performance settings applied here</h3>
+   * <ul>
+   *   <li><b>Page cache</b> ({@code derby.storage.pageCacheSize = 20000}):
+   *       ~80 MB of 4-KB B-tree pages resident in heap — covers the hot path
+   *       for index scans on a 1-M-row table even with the file-based
+   *       driver.</li>
+   * </ul>
+   */
+  @BeforeAll
+  public void setUpDatabaseAndInsertData(@TempDir Path tempDir) throws 
Exception {
+    LOG.info("=== Derby Performance Benchmark — Setup ===");
+    LOG.info("Dataset: {} states × {} container IDs = {} total records",
+        TESTED_STATES.size(), CONTAINER_ID_RANGE, TOTAL_RECORDS);
+
+    // Derby engine property — must be set before the first connection.
+    //
+    // pageCacheSize: number of 4-KB pages Derby keeps in its buffer pool.
+    //   Default = 1,000 pages (4 MB) — far too small for a 1-M-row table.
+    //   20,000 pages = ~80 MB, enough to hold the full B-tree for both the
+    //   primary-key index and the composite (state, container_id) index.
+    System.setProperty("derby.storage.pageCacheSize", "20000");
+
+    // ----- Guice wiring (mirrors AbstractReconSqlDBTest) -----
+    File configDir = Files.createDirectory(tempDir.resolve("Config")).toFile();
+    Provider<DataSourceConfiguration> configProvider =
+        new DerbyDataSourceConfigurationProvider(configDir);
+
+    Injector injector = Guice.createInjector(
+        new JooqPersistenceModule(configProvider),
+        new AbstractModule() {
+          @Override
+          protected void configure() {
+            bind(DataSourceConfiguration.class).toProvider(configProvider);
+            bind(ReconSchemaManager.class);
+          }
+        },
+        new ReconSchemaGenerationModule(),
+        new ReconDaoBindingModule());
+
+    injector.getInstance(ReconSchemaManager.class).createReconSchema();
+
+    dao = injector.getInstance(UnhealthyContainersDao.class);
+    schemaDefinition = injector.getInstance(ContainerSchemaDefinition.class);
+    schemaManager = new ContainerHealthSchemaManager(schemaDefinition, dao);
+
+    // ----- Insert 1 M records in small per-transaction chunks -----
+    //
+    // Why chunked?  insertUnhealthyContainerRecords wraps its entire input in
+    // a single Derby transaction.  Passing all 1 M records at once forces 
Derby
+    // to buffer the full WAL before committing, which exhausts its log and
+    // causes the call to hang.  Committing every CONTAINERS_PER_TX containers
+    // (= 10 K rows) keeps each transaction small and lets Derby flush the log.
+    int txCount = (int) Math.ceil((double) CONTAINER_ID_RANGE / 
CONTAINERS_PER_TX);
+    LOG.info("Starting bulk INSERT: {} records  ({} containers/tx, {} 
transactions)",
+        TOTAL_RECORDS, CONTAINERS_PER_TX, txCount);
+
+    long now = System.currentTimeMillis();
+    long insertStart = System.nanoTime();
+
+    for (int startId = 1; startId <= CONTAINER_ID_RANGE; startId += 
CONTAINERS_PER_TX) {

Review Comment:
   Updated the tests. Below is perf data:
   
   ```
   2026-03-23 17:47:28,930 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testBatchInsertOneMillionRecords(290))
 - --- Test 1: Batch INSERT 1000000 records (2000 containers/tx, 100 
transactions) ---
   2026-03-23 17:47:43,984 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testBatchInsertOneMillionRecords(304))
 - Batch INSERT complete: 1000000 records in 15037 ms (66503 rec/sec, 100 tx)
   2026-03-23 17:47:43,993 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testTotalInsertedRecordCountIsOneMillion(319))
 - --- Test 2: Verify total row count = 1000000 ---
   2026-03-23 17:47:44,267 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testTotalInsertedRecordCountIsOneMillion(325))
 - COUNT(*) = 1000000 rows in 274 ms
   2026-03-23 17:47:44,270 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testCountByStatePerformanceUsesIndex(345))
 - --- Test 3: COUNT(*) by state (index-covered, 200000 records each) ---
   2026-03-23 17:47:44,361 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testCountByStatePerformanceUsesIndex(359))
 -   COUNT(UNDER_REPLICATED) = 200000 rows  in 91 ms
   2026-03-23 17:47:44,495 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testCountByStatePerformanceUsesIndex(359))
 -   COUNT(MISSING) = 200000 rows  in 133 ms
   2026-03-23 17:47:44,624 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testCountByStatePerformanceUsesIndex(359))
 -   COUNT(OVER_REPLICATED) = 200000 rows  in 128 ms
   2026-03-23 17:47:44,687 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testCountByStatePerformanceUsesIndex(359))
 -   COUNT(MIS_REPLICATED) = 200000 rows  in 62 ms
   2026-03-23 17:47:44,722 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testCountByStatePerformanceUsesIndex(359))
 -   COUNT(EMPTY_MISSING) = 200000 rows  in 33 ms
   2026-03-23 17:47:44,723 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testGroupBySummaryQueryPerformance(385))
 - --- Test 4: GROUP BY summary over 1000000 rows ---
   2026-03-23 17:47:45,271 [ForkJoinPool-1-worker-1] INFO  impl.Tools 
(JooqLogger.java:info(338)) - Kotlin is available, but not kotlin-reflect. Add 
the kotlin-reflect dependency to better use Kotlin features like data classes
   2026-03-23 17:47:45,272 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testGroupBySummaryQueryPerformance(392))
 - GROUP BY summary: 5 state groups returned in 548 ms
   2026-03-23 17:47:45,272 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:lambda$testGroupBySummaryQueryPerformance$0(395))
 -   state=EMPTY_MISSING count=200000
   2026-03-23 17:47:45,273 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:lambda$testGroupBySummaryQueryPerformance$0(395))
 -   state=MISSING count=200000
   2026-03-23 17:47:45,273 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:lambda$testGroupBySummaryQueryPerformance$0(395))
 -   state=MIS_REPLICATED count=200000
   2026-03-23 17:47:45,273 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:lambda$testGroupBySummaryQueryPerformance$0(395))
 -   state=OVER_REPLICATED count=200000
   2026-03-23 17:47:45,273 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:lambda$testGroupBySummaryQueryPerformance$0(395))
 -   state=UNDER_REPLICATED count=200000
   2026-03-23 17:47:45,274 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testPaginatedReadByStatePerformance(431))
 - --- Test 5: Paginated read of UNDER_REPLICATED (200000 records, page size 
5000) ---
   2026-03-23 17:47:45,779 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testPaginatedReadByStatePerformance(470))
 - Paginated read: 200000 records in 40 pages, 504 ms  (200000 rec/sec)
   2026-03-23 17:47:45,781 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testFullDatasetReadThroughputAllStates(496))
 - --- Test 6: Full 1 M record read (all states, paged) ---
   2026-03-23 17:47:46,155 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testFullDatasetReadThroughputAllStates(524))
 -   State UNDER_REPLICATED: 200000 records in 373 ms
   2026-03-23 17:47:46,532 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testFullDatasetReadThroughputAllStates(524))
 -   State MISSING: 200000 records in 376 ms
   2026-03-23 17:47:46,915 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testFullDatasetReadThroughputAllStates(524))
 -   State OVER_REPLICATED: 200000 records in 382 ms
   2026-03-23 17:47:47,275 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testFullDatasetReadThroughputAllStates(524))
 -   State MIS_REPLICATED: 200000 records in 359 ms
   2026-03-23 17:47:47,647 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testFullDatasetReadThroughputAllStates(524))
 -   State EMPTY_MISSING: 200000 records in 371 ms
   2026-03-23 17:47:47,648 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testFullDatasetReadThroughputAllStates(532))
 - Full dataset read: 1000000 total records in 1865 ms  (536193 rec/sec)
   2026-03-23 17:47:47,650 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testAtomicReplaceDeleteAndInsertInSingleTransaction(562))
 - --- Test 7: Atomic replace — 200000 IDs × 5 states = 1000000 rows in one tx 
---
   2026-03-23 17:49:06,774 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testAtomicReplaceDeleteAndInsertInSingleTransaction(575))
 - Atomic replace completed in 79103 ms
   2026-03-23 17:49:07,003 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testBatchDeletePerformanceOneMillionRecords(626))
 - --- Test 8: Batch DELETE — 200000 IDs × 5 states = 1000000 rows  (200 
internal SQL statements of 1000 IDs) ---
   2026-03-23 17:50:11,884 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testBatchDeletePerformanceOneMillionRecords(644))
 - DELETE complete: 200000 IDs (1000000 rows) in 64881 ms via 200 SQL 
statements  (15413 rows/sec)
   2026-03-23 17:50:11,917 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testBatchDeletePerformanceOneMillionRecords(649))
 - Rows remaining after delete: 0 (expected 0)
   2026-03-23 17:50:11,918 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testCountByStateAfterFullDelete(672))
 - --- Test 9: COUNT by state after full delete (expected 0 each) ---
   2026-03-23 17:50:11,934 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testCountByStateAfterFullDelete(686))
 -   COUNT(UNDER_REPLICATED) = 0 rows in 15 ms
   2026-03-23 17:50:11,951 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testCountByStateAfterFullDelete(686))
 -   COUNT(MISSING) = 0 rows in 16 ms
   2026-03-23 17:50:11,966 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testCountByStateAfterFullDelete(686))
 -   COUNT(OVER_REPLICATED) = 0 rows in 15 ms
   2026-03-23 17:50:11,968 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testCountByStateAfterFullDelete(686))
 -   COUNT(MIS_REPLICATED) = 0 rows in 1 ms
   2026-03-23 17:50:11,970 [ForkJoinPool-1-worker-1] INFO  
persistence.TestUnhealthyContainersDerbyPerformance 
(TestUnhealthyContainersDerbyPerformance.java:testCountByStateAfterFullDelete(686))
 -   COUNT(EMPTY_MISSING) = 0 rows in 1 ms
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to