This is an automated email from the ASF dual-hosted git repository.

kfaraz pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/druid.git


The following commit(s) were added to refs/heads/master by this push:
     new b9f33453f1a Add unit tests and benchmarks for 
SqlSegmentsMetadataQuery.retrieveUsedSegments (#18108)
b9f33453f1a is described below

commit b9f33453f1a22b9c5ee237eac5ffb7c7004ddfb0
Author: Uddeshya Singh <[email protected]>
AuthorDate: Thu Jun 12 11:44:08 2025 +0530

    Add unit tests and benchmarks for 
SqlSegmentsMetadataQuery.retrieveUsedSegments (#18108)
    
    Changes:
    - Add utility methods to `TestDerbyConnector`
    - Add `SqlSegmentsMetadataQueryBenchmark`
    - Add new tests in `SqlSegmentsMetadataQueryTest`
---
 benchmarks/pom.xml                                 |   5 +
 .../SqlSegmentsMetadataQueryBenchmark.java         | 157 +++++++++++++++++++++
 .../indexing/common/actions/TaskActionTestKit.java |   4 +-
 ...dexerSqlMetadataStorageCoordinatorTestBase.java |  88 +++++++++---
 .../metadata/SqlSegmentsMetadataQueryTest.java     |  51 +++++++
 .../apache/druid/metadata/TestDerbyConnector.java  |  95 ++++++++++---
 6 files changed, 358 insertions(+), 42 deletions(-)

diff --git a/benchmarks/pom.xml b/benchmarks/pom.xml
index fd190aadfbf..5b6fc06fda9 100644
--- a/benchmarks/pom.xml
+++ b/benchmarks/pom.xml
@@ -31,6 +31,11 @@
   </parent>
 
   <dependencies>
+    <dependency>
+      <groupId>org.jdbi</groupId>
+      <artifactId>jdbi</artifactId>
+      <scope>provided</scope>
+    </dependency>
     <dependency>
       <groupId>org.openjdk.jmh</groupId>
       <artifactId>jmh-core</artifactId>
diff --git 
a/benchmarks/src/test/java/org/apache/druid/benchmark/indexing/SqlSegmentsMetadataQueryBenchmark.java
 
b/benchmarks/src/test/java/org/apache/druid/benchmark/indexing/SqlSegmentsMetadataQueryBenchmark.java
new file mode 100644
index 00000000000..aa49884cd0b
--- /dev/null
+++ 
b/benchmarks/src/test/java/org/apache/druid/benchmark/indexing/SqlSegmentsMetadataQueryBenchmark.java
@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.benchmark.indexing;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.druid.java.util.common.DateTimes;
+import org.apache.druid.java.util.common.granularity.Granularities;
+import org.apache.druid.java.util.common.parsers.CloseableIterator;
+import org.apache.druid.metadata.IndexerSqlMetadataStorageCoordinatorTestBase;
+import org.apache.druid.metadata.MetadataStorageTablesConfig;
+import org.apache.druid.metadata.SqlSegmentsMetadataQuery;
+import org.apache.druid.metadata.TestDerbyConnector;
+import org.apache.druid.segment.TestDataSource;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.server.coordinator.CreateDataSegments;
+import org.apache.druid.timeline.DataSegment;
+import org.joda.time.DateTime;
+import org.joda.time.Interval;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+
+@State(Scope.Benchmark)
+@Fork(value = 1)
+@Warmup(iterations = 1, time = 1)
+@Measurement(iterations = 20, time = 2)
+@BenchmarkMode({Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+public class SqlSegmentsMetadataQueryBenchmark
+{
+
+  private static final DateTime JAN_1 = DateTimes.of("2025-01-01");
+  private static final String V1 = JAN_1.toString();
+  private static final List<DataSegment> WIKI_SEGMENTS_1000X100D
+      = CreateDataSegments.ofDatasource(TestDataSource.WIKI)
+                          .forIntervals(100, Granularities.DAY)
+                          .withNumPartitions(1000)
+                          .startingAt(JAN_1)
+                          .withVersion(V1)
+                          .eachOfSizeInMb(500);
+
+  private TestDerbyConnector derbyConnector;
+
+  @Setup(Level.Trial)
+  public void setup() throws Exception
+  {
+    this.derbyConnector = new TestDerbyConnector();
+    derbyConnector.createDatabase();
+    derbyConnector.createSegmentTable();
+    insertSegments(WIKI_SEGMENTS_1000X100D.toArray(new DataSegment[0]));
+  }
+
+  @TearDown(Level.Trial)
+  public void tearDown() throws Exception
+  {
+    derbyConnector.tearDown();
+  }
+
+  @Benchmark
+  public void benchmarkRetrieveUsedSegments_returnAllSegments(Blackhole 
blackhole)
+  {
+    final Interval queryInterval = new Interval(JAN_1, JAN_1.plusDays(3));
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval))));
+  }
+
+  @Benchmark
+  public void benchmarkRetrieveUsedSegments_returnEmpty(Blackhole blackhole)
+  {
+    final Interval queryInterval = new Interval(JAN_1.minusDays(2), 
JAN_1.minusDays(1));
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval))));
+  }
+
+  @Benchmark
+  public void benchmarkRetrieveUsedSegments_returnFirstInterval(Blackhole 
blackhole)
+  {
+    final Interval queryInterval = new Interval(JAN_1, JAN_1.plusDays(1));
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval))));
+  }
+
+  @Benchmark
+  public void benchmarkRetrieveUsedSegments_returnLastInterval(Blackhole 
blackhole)
+  {
+    final Interval queryInterval = new Interval(JAN_1.plusDays(99), 
JAN_1.plusDays(100));
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval))));
+  }
+
+
+  @Benchmark
+  public void 
benchmarkRetrieveUsedSegments_multipleIntervalsWithOverlaps(Blackhole blackhole)
+  {
+    List<Interval> intervals = List.of(
+        new Interval(JAN_1, JAN_1.plusDays(3)),
+        new Interval(JAN_1.plusDays(2), JAN_1.plusDays(17)),
+        new Interval(JAN_1.plusDays(31), JAN_1.plusDays(36)),
+        new Interval(JAN_1.plusDays(35), JAN_1.plusDays(54)),
+        new Interval(JAN_1.plusDays(68), JAN_1.plusDays(98))
+    );
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, intervals)));
+  }
+
+  private <T> Set<T> readAsSet(Function<SqlSegmentsMetadataQuery, 
CloseableIterator<T>> iterableReader)
+  {
+    final MetadataStorageTablesConfig tablesConfig = 
derbyConnector.getMetadataTablesConfig();
+
+    return derbyConnector.inReadOnlyTransaction((handle, status) -> {
+      final SqlSegmentsMetadataQuery query =
+          SqlSegmentsMetadataQuery.forHandle(handle, derbyConnector, 
tablesConfig, TestHelper.JSON_MAPPER);
+
+      try (CloseableIterator<T> iterator = iterableReader.apply(query)) {
+        return ImmutableSet.copyOf(iterator);
+      }
+    });
+  }
+
+  private void insertSegments(DataSegment... segments)
+  {
+    IndexerSqlMetadataStorageCoordinatorTestBase.insertUsedSegments(
+        Set.of(segments),
+        Map.of(),
+        derbyConnector,
+        TestHelper.JSON_MAPPER
+    );
+  }
+}
diff --git 
a/indexing-service/src/test/java/org/apache/druid/indexing/common/actions/TaskActionTestKit.java
 
b/indexing-service/src/test/java/org/apache/druid/indexing/common/actions/TaskActionTestKit.java
index 76e207521a7..b7c47a60a7d 100644
--- 
a/indexing-service/src/test/java/org/apache/druid/indexing/common/actions/TaskActionTestKit.java
+++ 
b/indexing-service/src/test/java/org/apache/druid/indexing/common/actions/TaskActionTestKit.java
@@ -117,8 +117,8 @@ public class TaskActionTestKit extends ExternalResource
     emitter = new StubServiceEmitter();
     taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(new 
Period("PT24H")));
     testDerbyConnector = new TestDerbyConnector(
-        Suppliers.ofInstance(new MetadataStorageConnectorConfig()),
-        Suppliers.ofInstance(metadataStorageTablesConfig)
+        new MetadataStorageConnectorConfig(),
+        metadataStorageTablesConfig
     );
     final ObjectMapper objectMapper = new TestUtils().getTestObjectMapper();
     final SegmentSchemaManager segmentSchemaManager = new SegmentSchemaManager(
diff --git 
a/server/src/test/java/org/apache/druid/metadata/IndexerSqlMetadataStorageCoordinatorTestBase.java
 
b/server/src/test/java/org/apache/druid/metadata/IndexerSqlMetadataStorageCoordinatorTestBase.java
index 17a02f19a73..36dd38a85c0 100644
--- 
a/server/src/test/java/org/apache/druid/metadata/IndexerSqlMetadataStorageCoordinatorTestBase.java
+++ 
b/server/src/test/java/org/apache/druid/metadata/IndexerSqlMetadataStorageCoordinatorTestBase.java
@@ -31,10 +31,8 @@ import org.apache.druid.java.util.common.Pair;
 import org.apache.druid.java.util.common.StringUtils;
 import org.apache.druid.java.util.common.jackson.JacksonUtils;
 import org.apache.druid.java.util.common.parsers.CloseableIterator;
-import org.apache.druid.segment.SegmentSchemaMapping;
 import org.apache.druid.segment.TestDataSource;
 import org.apache.druid.segment.TestHelper;
-import org.apache.druid.segment.metadata.CentralizedDatasourceSchemaConfig;
 import org.apache.druid.segment.metadata.FingerprintGenerator;
 import org.apache.druid.segment.metadata.SegmentSchemaManager;
 import org.apache.druid.segment.metadata.SegmentSchemaTestUtils;
@@ -332,14 +330,14 @@ public class IndexerSqlMetadataStorageCoordinatorTestBase
 
     for (int year = startYear; year < endYear; year++) {
       segments.add(createSegment(
-          Intervals.of("%d/%d", year, year + 1),
-          "version",
-          new LinearShardSpec(0))
+                       Intervals.of("%d/%d", year, year + 1),
+                       "version",
+                       new LinearShardSpec(0)
+                   )
       );
     }
     final Set<DataSegment> segmentsSet = new HashSet<>(segments);
-    final Set<DataSegment> committedSegments = 
coordinator.commitSegments(segmentsSet, new SegmentSchemaMapping(
-        CentralizedDatasourceSchemaConfig.SCHEMA_VERSION));
+    final Set<DataSegment> committedSegments = 
coordinator.commitSegments(segmentsSet, null);
     Assert.assertTrue(committedSegments.containsAll(segmentsSet));
 
     return segments;
@@ -363,7 +361,15 @@ public class IndexerSqlMetadataStorageCoordinatorTestBase
                                                tablesConfig,
                                                mapper
                                            )
-                                           
.retrieveUnusedSegments(TestDataSource.WIKI, intervals, null, limit, 
lastSegmentId, sortOrder, maxUsedStatusLastUpdatedTime)) {
+                                           .retrieveUnusedSegments(
+                                               TestDataSource.WIKI,
+                                               intervals,
+                                               null,
+                                               limit,
+                                               lastSegmentId,
+                                               sortOrder,
+                                               maxUsedStatusLastUpdatedTime
+                                           )) {
             return ImmutableList.copyOf(iterator);
           }
         }
@@ -383,7 +389,15 @@ public class IndexerSqlMetadataStorageCoordinatorTestBase
         (handle, status) -> {
           try (final CloseableIterator<DataSegmentPlus> iterator =
                    SqlSegmentsMetadataQuery.forHandle(handle, derbyConnector, 
tablesConfig, mapper)
-                                           
.retrieveUnusedSegmentsPlus(TestDataSource.WIKI, intervals, null, limit, 
lastSegmentId, sortOrder, maxUsedStatusLastUpdatedTime)) {
+                                           .retrieveUnusedSegmentsPlus(
+                                               TestDataSource.WIKI,
+                                               intervals,
+                                               null,
+                                               limit,
+                                               lastSegmentId,
+                                               sortOrder,
+                                               maxUsedStatusLastUpdatedTime
+                                           )) {
             return ImmutableList.copyOf(iterator);
           }
         }
@@ -393,11 +407,20 @@ public class IndexerSqlMetadataStorageCoordinatorTestBase
   protected void verifyContainsAllSegmentsPlus(
       List<DataSegment> expectedSegments,
       List<DataSegmentPlus> actualUnusedSegmentsPlus,
-      DateTime usedStatusLastUpdatedTime)
+      DateTime usedStatusLastUpdatedTime
+  )
   {
-    Map<SegmentId, DataSegment> expectedIdToSegment = 
expectedSegments.stream().collect(Collectors.toMap(DataSegment::getId, 
Function.identity()));
+    Map<SegmentId, DataSegment> expectedIdToSegment = expectedSegments.stream()
+                                                                      
.collect(Collectors.toMap(
+                                                                          
DataSegment::getId,
+                                                                          
Function.identity()
+                                                                      ));
     Map<SegmentId, DataSegmentPlus> actualIdToSegmentPlus = 
actualUnusedSegmentsPlus.stream()
-                                                                               
     .collect(Collectors.toMap(d -> d.getDataSegment().getId(), 
Function.identity()));
+                                                                               
     .collect(Collectors.toMap(
+                                                                               
         d -> d.getDataSegment()
+                                                                               
               .getId(),
+                                                                               
         Function.identity()
+                                                                               
     ));
     Assert.assertTrue(expectedIdToSegment.entrySet().stream().allMatch(e -> {
       DataSegmentPlus segmentPlus = actualIdToSegmentPlus.get(e.getKey());
       return segmentPlus != null
@@ -482,7 +505,11 @@ public class IndexerSqlMetadataStorageCoordinatorTestBase
     final String table = tablesConfig.getSegmentsTable();
     return derbyConnector.retryWithHandle(
         handle -> handle.createQuery("SELECT payload FROM " + table + " WHERE 
used = true ORDER BY id")
-                        .map((index, result, context) -> 
JacksonUtils.readValue(mapper, result.getBytes(1), DataSegment.class))
+                        .map((index, result, context) -> 
JacksonUtils.readValue(
+                            mapper,
+                            result.getBytes(1),
+                            DataSegment.class
+                        ))
                         .list()
     );
   }
@@ -497,7 +524,10 @@ public class IndexerSqlMetadataStorageCoordinatorTestBase
     );
   }
 
-  protected Map<String, String> getSegmentsCommittedDuringReplaceTask(String 
taskId, MetadataStorageTablesConfig tablesConfig)
+  protected Map<String, String> getSegmentsCommittedDuringReplaceTask(
+      String taskId,
+      MetadataStorageTablesConfig tablesConfig
+  )
   {
     final String table = tablesConfig.getUpgradeSegmentsTable();
     return derbyConnector.retryWithHandle(handle -> {
@@ -523,7 +553,10 @@ public class IndexerSqlMetadataStorageCoordinatorTestBase
     });
   }
 
-  protected void insertIntoUpgradeSegmentsTable(Map<DataSegment, 
ReplaceTaskLock> segmentToTaskLockMap, MetadataStorageTablesConfig tablesConfig)
+  protected void insertIntoUpgradeSegmentsTable(
+      Map<DataSegment, ReplaceTaskLock> segmentToTaskLockMap,
+      MetadataStorageTablesConfig tablesConfig
+  )
   {
     final String table = tablesConfig.getUpgradeSegmentsTable();
     derbyConnector.retryWithHandle(
@@ -562,6 +595,16 @@ public class IndexerSqlMetadataStorageCoordinatorTestBase
       TestDerbyConnector.DerbyConnectorRule derbyConnectorRule,
       ObjectMapper jsonMapper
   )
+  {
+    insertUsedSegments(dataSegments, upgradedFromSegmentIdMap, 
derbyConnectorRule.getConnector(), jsonMapper);
+  }
+
+  public static void insertUsedSegments(
+      Set<DataSegment> dataSegments,
+      Map<String, String> upgradedFromSegmentIdMap,
+      TestDerbyConnector connector,
+      ObjectMapper jsonMapper
+  )
   {
     final Set<DataSegmentPlus> usedSegments = new HashSet<>();
     for (DataSegment segment : dataSegments) {
@@ -579,7 +622,7 @@ public class IndexerSqlMetadataStorageCoordinatorTestBase
       );
     }
 
-    insertSegments(usedSegments, false, derbyConnectorRule, jsonMapper);
+    insertSegments(usedSegments, false, connector, jsonMapper);
   }
 
   public static void insertSegments(
@@ -589,8 +632,17 @@ public class IndexerSqlMetadataStorageCoordinatorTestBase
       ObjectMapper jsonMapper
   )
   {
-    final TestDerbyConnector connector = derbyConnectorRule.getConnector();
-    final String table = 
derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable();
+    insertSegments(dataSegments, includeSchema, 
derbyConnectorRule.getConnector(), jsonMapper);
+  }
+
+  public static void insertSegments(
+      Set<DataSegmentPlus> dataSegments,
+      boolean includeSchema,
+      TestDerbyConnector connector,
+      ObjectMapper jsonMapper
+  )
+  {
+    final String table = 
connector.getMetadataTablesConfig().getSegmentsTable();
 
     final String sql = getSegmentInsertSql(includeSchema, table, connector);
     connector.retryWithHandle(
diff --git 
a/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataQueryTest.java
 
b/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataQueryTest.java
index 9dd1b916c4e..1084c888e01 100644
--- 
a/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataQueryTest.java
+++ 
b/server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataQueryTest.java
@@ -238,6 +238,57 @@ public class SqlSegmentsMetadataQueryTest
     );
   }
 
+  @Test
+  public void test_retrieveUsedSegments_withOverlapsCondition()
+  {
+    Interval queryInterval = new Interval(JAN_1.plusDays(2), 
JAN_1.plusDays(4));
+
+    Set<DataSegment> result = readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval)));
+
+    Assert.assertEquals(4, result.size());
+    assertSegmentsOverlapInterval(result, queryInterval);
+  }
+
+  @Test
+  public void 
test_retrieveUsedSegments_withOverlapsCondition_andUnusedSegments()
+  {
+    final Set<DataSegment> segmentsToUpdate = 
Set.of(WIKI_SEGMENTS_2X5D.get(2));
+    int numUpdatedSegments = update(
+        sql -> sql.markSegmentsAsUnused(getIds(segmentsToUpdate), 
DateTimes.nowUtc())
+    );
+    Assert.assertEquals(1, numUpdatedSegments);
+
+    final Interval queryInterval = new Interval(JAN_1, JAN_1.plusDays(2));
+
+    Set<DataSegment> result = readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval)));
+
+    Assert.assertEquals(3, result.size());
+    assertSegmentsOverlapInterval(result, queryInterval);
+  }
+
+  @Test
+  public void test_retrieveUsedSegments_withOverlapsCondition_nearEndDate()
+  {
+    Interval queryInterval = new Interval(JAN_1.plusDays(4), 
JAN_1.plusDays(5));
+
+    Set<DataSegment> result = readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval)));
+    Assert.assertEquals(2, result.size());
+    assertSegmentsOverlapInterval(result, queryInterval);
+  }
+
+  private void assertSegmentsOverlapInterval(
+      Set<DataSegment> segments,
+      Interval interval
+  )
+  {
+    for (DataSegment segment : segments) {
+      Assert.assertTrue(
+          "Segment " + segment.getId() + " should be in interval " + interval,
+          segment.getInterval().overlaps(interval)
+      );
+    }
+  }
+
   /**
    * Reads segments from the metadata store using a
    * {@link SqlSegmentsMetadataQuery} object.
diff --git 
a/server/src/test/java/org/apache/druid/metadata/TestDerbyConnector.java 
b/server/src/test/java/org/apache/druid/metadata/TestDerbyConnector.java
index 6496a513548..800ee8f9e34 100644
--- a/server/src/test/java/org/apache/druid/metadata/TestDerbyConnector.java
+++ b/server/src/test/java/org/apache/druid/metadata/TestDerbyConnector.java
@@ -49,33 +49,65 @@ import java.util.UUID;
 public class TestDerbyConnector extends DerbyConnector
 {
   private final String jdbcUri;
+  private final MetadataStorageTablesConfig dbTables;
 
   public TestDerbyConnector(
-      Supplier<MetadataStorageConnectorConfig> config,
-      Supplier<MetadataStorageTablesConfig> dbTables,
+      MetadataStorageConnectorConfig connectorConfig,
+      MetadataStorageTablesConfig tablesConfig,
       CentralizedDatasourceSchemaConfig centralizedDatasourceSchemaConfig
   )
   {
-    this(config, dbTables, "jdbc:derby:memory:druidTest" + dbSafeUUID(), 
centralizedDatasourceSchemaConfig);
+    this(
+        connectorConfig,
+        tablesConfig,
+        "jdbc:derby:memory:druidTest" + dbSafeUUID(),
+        centralizedDatasourceSchemaConfig
+    );
   }
 
   public TestDerbyConnector(
-      Supplier<MetadataStorageConnectorConfig> config,
-      Supplier<MetadataStorageTablesConfig> dbTables
+      MetadataStorageConnectorConfig connectorConfig,
+      MetadataStorageTablesConfig tablesConfig
   )
   {
-    this(config, dbTables, "jdbc:derby:memory:druidTest" + dbSafeUUID(), 
CentralizedDatasourceSchemaConfig.create());
+    this(
+        connectorConfig,
+        tablesConfig,
+        "jdbc:derby:memory:druidTest" + dbSafeUUID(),
+        CentralizedDatasourceSchemaConfig.create()
+    );
   }
 
   protected TestDerbyConnector(
-      Supplier<MetadataStorageConnectorConfig> config,
-      Supplier<MetadataStorageTablesConfig> dbTables,
+      MetadataStorageConnectorConfig connectorConfig,
+      MetadataStorageTablesConfig tablesConfig,
       String jdbcUri,
       CentralizedDatasourceSchemaConfig centralizedDatasourceSchemaConfig
   )
   {
-    super(new NoopMetadataStorageProvider().get(), config, dbTables, new 
DBI(jdbcUri + ";create=true"), centralizedDatasourceSchemaConfig);
+    super(
+        new NoopMetadataStorageProvider().get(),
+        Suppliers.ofInstance(connectorConfig),
+        Suppliers.ofInstance(tablesConfig),
+        new DBI(jdbcUri + ";create=true"),
+        centralizedDatasourceSchemaConfig
+    );
     this.jdbcUri = jdbcUri;
+    this.dbTables = tablesConfig;
+  }
+
+  public TestDerbyConnector()
+  {
+    this(
+        new MetadataStorageConnectorConfig(),
+        MetadataStorageTablesConfig.fromBase("druidTest" + dbSafeUUID()),
+        CentralizedDatasourceSchemaConfig.create()
+    );
+  }
+
+  public MetadataStorageTablesConfig getMetadataTablesConfig()
+  {
+    return this.dbTables;
   }
 
   public void tearDown()
@@ -86,7 +118,11 @@ public class TestDerbyConnector extends DerbyConnector
     catch (UnableToObtainConnectionException e) {
       SQLException cause = (SQLException) e.getCause();
       // error code "08006" indicates proper shutdown
-      Assert.assertEquals(StringUtils.format("Derby not shutdown: [%s]", 
cause.toString()), "08006", cause.getSQLState());
+      Assert.assertEquals(
+          StringUtils.format("Derby not shutdown: [%s]", cause.toString()),
+          "08006",
+          cause.getSQLState()
+      );
     }
   }
 
@@ -100,10 +136,15 @@ public class TestDerbyConnector extends DerbyConnector
     return jdbcUri;
   }
 
+  public void createDatabase()
+  {
+    this.getDBI().open().close();
+  }
+
   public static class DerbyConnectorRule extends ExternalResource
   {
     private TestDerbyConnector connector;
-    private final Supplier<MetadataStorageTablesConfig> dbTables;
+    private final MetadataStorageTablesConfig tablesConfig;
     private final MetadataStorageConnectorConfig connectorConfig;
     private final CentralizedDatasourceSchemaConfig 
centralizedDatasourceSchemaConfig;
 
@@ -114,22 +155,28 @@ public class TestDerbyConnector extends DerbyConnector
 
     public DerbyConnectorRule(CentralizedDatasourceSchemaConfig 
centralizedDatasourceSchemaConfig)
     {
-      
this(Suppliers.ofInstance(MetadataStorageTablesConfig.fromBase("druidTest" + 
dbSafeUUID())), centralizedDatasourceSchemaConfig);
+      this(
+          MetadataStorageTablesConfig.fromBase("druidTest" + dbSafeUUID()),
+          centralizedDatasourceSchemaConfig
+      );
     }
 
     private DerbyConnectorRule(
         final String defaultBase
     )
     {
-      
this(Suppliers.ofInstance(MetadataStorageTablesConfig.fromBase(defaultBase)), 
CentralizedDatasourceSchemaConfig.create());
+      this(
+          MetadataStorageTablesConfig.fromBase(defaultBase),
+          CentralizedDatasourceSchemaConfig.create()
+      );
     }
 
     public DerbyConnectorRule(
-        Supplier<MetadataStorageTablesConfig> dbTables,
+        MetadataStorageTablesConfig tablesConfig,
         CentralizedDatasourceSchemaConfig centralizedDatasourceSchemaConfig
     )
     {
-      this.dbTables = dbTables;
+      this.tablesConfig = tablesConfig;
       this.connectorConfig = new MetadataStorageConnectorConfig()
       {
         @Override
@@ -144,8 +191,12 @@ public class TestDerbyConnector extends DerbyConnector
     @Override
     protected void before()
     {
-      connector = new 
TestDerbyConnector(Suppliers.ofInstance(connectorConfig), dbTables, 
centralizedDatasourceSchemaConfig);
-      connector.getDBI().open().close(); // create db
+      connector = new TestDerbyConnector(
+          connectorConfig,
+          tablesConfig,
+          centralizedDatasourceSchemaConfig
+      );
+      connector.createDatabase(); // create db
     }
 
     @Override
@@ -166,7 +217,7 @@ public class TestDerbyConnector extends DerbyConnector
 
     public Supplier<MetadataStorageTablesConfig> metadataTablesConfigSupplier()
     {
-      return dbTables;
+      return Suppliers.ofInstance(tablesConfig);
     }
 
     public SegmentsTable segments()
@@ -259,7 +310,7 @@ public class TestDerbyConnector extends DerbyConnector
      * Updates the segments table with the supplied SQL query format and 
arguments.
      *
      * @param sqlFormat the SQL query format with %s placeholder for the table 
name and ? for each query {@code args}
-     * @param args the arguments to be substituted into the SQL query
+     * @param args      the arguments to be substituted into the SQL query
      * @return the number of rows affected by the update operation
      */
     public int update(String sqlFormat, Object... args)
@@ -284,9 +335,9 @@ public class TestDerbyConnector extends DerbyConnector
     public String getTableName()
     {
       return this.rule.metadataTablesConfigSupplier()
-                 .get()
-                 .getSegmentsTable()
-                 .toUpperCase(Locale.ENGLISH);
+                      .get()
+                      .getSegmentsTable()
+                      .toUpperCase(Locale.ENGLISH);
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to