kfaraz commented on code in PR #18108:
URL: https://github.com/apache/druid/pull/18108#discussion_r2137987295


##########
server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataQueryTest.java:
##########
@@ -238,6 +238,56 @@ public void 
test_retrieveSegmentForId_returnsNull_forUnknownId()
     );
   }
 
+  @Test
+  public void test_retrieveUsedSegments_withOverlapsCondition()
+  {
+    Interval queryInterval = new 
Interval("2025-01-03T00:00:00.000Z/2025-01-05T00:00:00.000Z");

Review Comment:
   Nit: to make intervals more readable
   
   ```suggestion
       Interval queryInterval = new Interval("2025-01-03/P2D");
   ```



##########
server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataQueryTest.java:
##########
@@ -238,6 +238,56 @@ public void 
test_retrieveSegmentForId_returnsNull_forUnknownId()
     );
   }
 
+  @Test
+  public void test_retrieveUsedSegments_withOverlapsCondition()
+  {
+    Interval queryInterval = new 
Interval("2025-01-03T00:00:00.000Z/2025-01-05T00:00:00.000Z");
+
+    Set<DataSegment> result = readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval)));
+
+    Assert.assertEquals(4, result.size());
+    assertSegmentsInInterval(result, queryInterval);
+  }
+
+  @Test
+  public void 
test_retrieveUsedSegments_withOverlapsConditionAndUnusedSegments()
+  {
+    final Set<DataSegment> segmentsToUpdate = 
Set.of(WIKI_SEGMENTS_2X5D.get(2));
+    int numUpdatedSegments = update(
+        sql -> sql.markSegmentsAsUnused(getIds(segmentsToUpdate), 
DateTimes.nowUtc()));
+    Assert.assertEquals(1, numUpdatedSegments);
+
+    Interval queryInterval = new 
Interval("2025-01-01T00:00:00.000Z/2025-01-03T00:00:00.000Z");
+
+    Set<DataSegment> result = readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval)));
+
+    Assert.assertEquals(3, result.size());
+    assertSegmentsInInterval(result, queryInterval);
+  }
+
+  @Test
+  public void test_retrieveUsedSegments_withOverlapsCondition_near_end_date()
+  {
+    Interval queryInterval = new 
Interval("2025-01-05T00:00:00.000Z/2025-01-06T00:00:00.000Z");
+
+    Set<DataSegment> result = readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval)));
+    Assert.assertEquals(2, result.size());
+    assertSegmentsInInterval(result, queryInterval);
+  }
+
+  private void assertSegmentsInInterval(

Review Comment:
   ```suggestion
     private void assertSegmentsOverlapInterval(
   ```



##########
server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataQueryTest.java:
##########
@@ -238,6 +238,56 @@ public void 
test_retrieveSegmentForId_returnsNull_forUnknownId()
     );
   }
 
+  @Test
+  public void test_retrieveUsedSegments_withOverlapsCondition()
+  {
+    Interval queryInterval = new 
Interval("2025-01-03T00:00:00.000Z/2025-01-05T00:00:00.000Z");
+
+    Set<DataSegment> result = readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval)));
+
+    Assert.assertEquals(4, result.size());
+    assertSegmentsInInterval(result, queryInterval);
+  }
+
+  @Test
+  public void 
test_retrieveUsedSegments_withOverlapsConditionAndUnusedSegments()
+  {
+    final Set<DataSegment> segmentsToUpdate = 
Set.of(WIKI_SEGMENTS_2X5D.get(2));
+    int numUpdatedSegments = update(
+        sql -> sql.markSegmentsAsUnused(getIds(segmentsToUpdate), 
DateTimes.nowUtc()));

Review Comment:
   style:
   ```suggestion
           sql -> sql.markSegmentsAsUnused(getIds(segmentsToUpdate), 
DateTimes.nowUtc())
       );
   ```



##########
benchmarks/src/test/java/org/apache/druid/benchmark/indexing/SqlSegmentsMetadataQueryBenchmark.java:
##########
@@ -0,0 +1,138 @@
+package org.apache.druid.benchmark.indexing;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.druid.java.util.common.DateTimes;
+import org.apache.druid.java.util.common.granularity.Granularities;
+import org.apache.druid.java.util.common.parsers.CloseableIterator;
+import org.apache.druid.metadata.IndexerSqlMetadataStorageCoordinatorTestBase;
+import org.apache.druid.metadata.MetadataStorageTablesConfig;
+import org.apache.druid.metadata.SqlSegmentsMetadataQuery;
+import org.apache.druid.metadata.TestDerbyConnector;
+import org.apache.druid.segment.TestDataSource;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.server.coordinator.CreateDataSegments;
+import org.apache.druid.timeline.DataSegment;
+import org.joda.time.DateTime;
+import org.joda.time.Interval;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+
+@State(Scope.Benchmark)
+@Fork(value = 1)
+@Warmup(iterations = 1, time = 1)
+@Measurement(iterations = 20, time = 2)
+@BenchmarkMode({Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+public class SqlSegmentsMetadataQueryBenchmark
+{
+
+  private static final DateTime JAN_1 = DateTimes.of("2025-01-01");
+  private static final String V1 = JAN_1.toString();
+  private static final List<DataSegment> WIKI_SEGMENTS_1000X100D
+      = CreateDataSegments.ofDatasource(TestDataSource.WIKI)
+                          .forIntervals(100, Granularities.DAY)
+                          .withNumPartitions(1000)
+                          .startingAt(JAN_1)
+                          .withVersion(V1)
+                          .eachOfSizeInMb(500);
+
+  private TestDerbyConnector derbyConnector;
+
+  @Setup(Level.Trial)
+  public void setup() throws Exception
+  {
+    this.derbyConnector = new TestDerbyConnector("druidBench");
+    derbyConnector.getDBI().open().close();
+    derbyConnector.createSegmentTable();
+    insertSegments(WIKI_SEGMENTS_1000X100D.toArray(new DataSegment[0]));
+  }
+
+  @TearDown(Level.Trial)
+  public void tearDown() throws Exception
+  {
+    derbyConnector.tearDown();
+  }
+
+  @Benchmark
+  public void benchmarkRetrieveUsedSegments_returnAllSegments(Blackhole 
blackhole)
+  {
+    final Interval queryInterval = new 
Interval("2025-01-01T00:00:00.000Z/2025-04-11T00:00:00.000Z");
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval))));
+  }
+
+  @Benchmark
+  public void benchmarkRetrieveUsedSegments_returnEmpty(Blackhole blackhole)
+  {
+    final Interval queryInterval = new 
Interval("2025-12-30T00:00:00.000Z/2025-12-31T00:00:00.000Z");
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval))));
+  }
+
+  @Benchmark
+  public void benchmarkRetrieveUsedSegments_returnFirstInterval(Blackhole 
blackhole)
+  {
+    final Interval queryInterval = new 
Interval("2025-01-01T00:00:00.000Z/2025-01-02T00:00:00.000Z");
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval))));
+  }
+
+  @Benchmark
+  public void benchmarkRetrieveUsedSegments_returnLastInterval(Blackhole 
blackhole)
+  {
+    final Interval queryInterval = new 
Interval("2025-04-10T00:00:00.000Z/2025-04-11T00:00:00.000Z");
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval))));
+  }
+
+
+  @Benchmark
+  public void 
benchmarkRetrieveUsedSegments_multipleIntervalsWithOverlaps(Blackhole blackhole)
+  {
+    List<Interval> intervals = List.of(
+        new Interval("2025-01-01T00:00:00.000Z/2025-01-04T00:00:00.000Z"),

Review Comment:
   similar comments as the unit test regarding not hardcoding the intervals and 
making them more readable.



##########
benchmarks/src/test/java/org/apache/druid/benchmark/indexing/SqlSegmentsMetadataQueryBenchmark.java:
##########
@@ -0,0 +1,138 @@
+package org.apache.druid.benchmark.indexing;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.druid.java.util.common.DateTimes;
+import org.apache.druid.java.util.common.granularity.Granularities;
+import org.apache.druid.java.util.common.parsers.CloseableIterator;
+import org.apache.druid.metadata.IndexerSqlMetadataStorageCoordinatorTestBase;
+import org.apache.druid.metadata.MetadataStorageTablesConfig;
+import org.apache.druid.metadata.SqlSegmentsMetadataQuery;
+import org.apache.druid.metadata.TestDerbyConnector;
+import org.apache.druid.segment.TestDataSource;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.server.coordinator.CreateDataSegments;
+import org.apache.druid.timeline.DataSegment;
+import org.joda.time.DateTime;
+import org.joda.time.Interval;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+
+@State(Scope.Benchmark)
+@Fork(value = 1)
+@Warmup(iterations = 1, time = 1)
+@Measurement(iterations = 20, time = 2)
+@BenchmarkMode({Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+public class SqlSegmentsMetadataQueryBenchmark
+{
+
+  private static final DateTime JAN_1 = DateTimes.of("2025-01-01");
+  private static final String V1 = JAN_1.toString();
+  private static final List<DataSegment> WIKI_SEGMENTS_1000X100D
+      = CreateDataSegments.ofDatasource(TestDataSource.WIKI)
+                          .forIntervals(100, Granularities.DAY)
+                          .withNumPartitions(1000)
+                          .startingAt(JAN_1)
+                          .withVersion(V1)
+                          .eachOfSizeInMb(500);
+
+  private TestDerbyConnector derbyConnector;
+
+  @Setup(Level.Trial)
+  public void setup() throws Exception
+  {
+    this.derbyConnector = new TestDerbyConnector("druidBench");
+    derbyConnector.getDBI().open().close();
+    derbyConnector.createSegmentTable();
+    insertSegments(WIKI_SEGMENTS_1000X100D.toArray(new DataSegment[0]));
+  }
+
+  @TearDown(Level.Trial)
+  public void tearDown() throws Exception
+  {
+    derbyConnector.tearDown();
+  }
+
+  @Benchmark
+  public void benchmarkRetrieveUsedSegments_returnAllSegments(Blackhole 
blackhole)
+  {
+    final Interval queryInterval = new 
Interval("2025-01-01T00:00:00.000Z/2025-04-11T00:00:00.000Z");
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval))));
+  }
+
+  @Benchmark
+  public void benchmarkRetrieveUsedSegments_returnEmpty(Blackhole blackhole)
+  {
+    final Interval queryInterval = new 
Interval("2025-12-30T00:00:00.000Z/2025-12-31T00:00:00.000Z");
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval))));
+  }
+
+  @Benchmark
+  public void benchmarkRetrieveUsedSegments_returnFirstInterval(Blackhole 
blackhole)
+  {
+    final Interval queryInterval = new 
Interval("2025-01-01T00:00:00.000Z/2025-01-02T00:00:00.000Z");
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval))));
+  }
+
+  @Benchmark
+  public void benchmarkRetrieveUsedSegments_returnLastInterval(Blackhole 
blackhole)
+  {
+    final Interval queryInterval = new 
Interval("2025-04-10T00:00:00.000Z/2025-04-11T00:00:00.000Z");
+    blackhole.consume(readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval))));
+  }
+
+
+  @Benchmark
+  public void 
benchmarkRetrieveUsedSegments_multipleIntervalsWithOverlaps(Blackhole blackhole)
+  {
+    List<Interval> intervals = List.of(
+        new Interval("2025-01-01T00:00:00.000Z/2025-01-04T00:00:00.000Z"),

Review Comment:
   Also, use `Intervals.of()` as I think this constructor is listed as a 
forbidden API.



##########
server/src/test/java/org/apache/druid/metadata/TestDerbyConnector.java:
##########
@@ -49,6 +49,7 @@
 public class TestDerbyConnector extends DerbyConnector
 {
   private final String jdbcUri;
+  private final Supplier<MetadataStorageTablesConfig> dbTables;

Review Comment:
   It is weird for this to be a supplier since the config will not change 
during the lifecycle of a single instance of `TestDerbyConnector`.
   
   In fact, this class should stop accepting suppliers of configs. I guess it 
was done to align with the super class `DerbyConnector` but it is needed there 
so that it plays well with Guice. I don't think we need suppliers for this test 
class.
   
   We can probably fix this up in this PR.



##########
server/src/test/java/org/apache/druid/metadata/IndexerSqlMetadataStorageCoordinatorTestBase.java:
##########
@@ -332,14 +333,17 @@ protected List<DataSegment> 
createAndGetUsedYearSegments(final int startYear, fi
 
     for (int year = startYear; year < endYear; year++) {
       segments.add(createSegment(
-          Intervals.of("%d/%d", year, year + 1),
-          "version",
-          new LinearShardSpec(0))
+                       Intervals.of("%d/%d", year, year + 1),
+                       "version",
+                       new LinearShardSpec(0)
+                   )
       );
     }
     final Set<DataSegment> segmentsSet = new HashSet<>(segments);
-    final Set<DataSegment> committedSegments = 
coordinator.commitSegments(segmentsSet, new SegmentSchemaMapping(
-        CentralizedDatasourceSchemaConfig.SCHEMA_VERSION));
+    final Set<DataSegment> committedSegments = coordinator.commitSegments(
+        segmentsSet, new SegmentSchemaMapping(
+            CentralizedDatasourceSchemaConfig.SCHEMA_VERSION)
+    );

Review Comment:
   I think this can be simplified by just passing null since it is passing an 
empty mapping anyway.
   
   ```suggestion
       final Set<DataSegment> committedSegments = 
coordinator.commitSegments(segmentsSet, null);
   ```



##########
server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataQueryTest.java:
##########
@@ -238,6 +238,56 @@ public void 
test_retrieveSegmentForId_returnsNull_forUnknownId()
     );
   }
 
+  @Test
+  public void test_retrieveUsedSegments_withOverlapsCondition()
+  {
+    Interval queryInterval = new 
Interval("2025-01-03T00:00:00.000Z/2025-01-05T00:00:00.000Z");
+
+    Set<DataSegment> result = readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval)));
+
+    Assert.assertEquals(4, result.size());
+    assertSegmentsInInterval(result, queryInterval);
+  }
+
+  @Test
+  public void 
test_retrieveUsedSegments_withOverlapsConditionAndUnusedSegments()
+  {
+    final Set<DataSegment> segmentsToUpdate = 
Set.of(WIKI_SEGMENTS_2X5D.get(2));
+    int numUpdatedSegments = update(
+        sql -> sql.markSegmentsAsUnused(getIds(segmentsToUpdate), 
DateTimes.nowUtc()));
+    Assert.assertEquals(1, numUpdatedSegments);
+
+    Interval queryInterval = new 
Interval("2025-01-01T00:00:00.000Z/2025-01-03T00:00:00.000Z");
+
+    Set<DataSegment> result = readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval)));
+
+    Assert.assertEquals(3, result.size());
+    assertSegmentsInInterval(result, queryInterval);
+  }
+
+  @Test
+  public void test_retrieveUsedSegments_withOverlapsCondition_near_end_date()

Review Comment:
   ```suggestion
     public void test_retrieveUsedSegments_withOverlapsCondition_nearEndDate()
   ```



##########
server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataQueryTest.java:
##########
@@ -238,6 +238,56 @@ public void 
test_retrieveSegmentForId_returnsNull_forUnknownId()
     );
   }
 
+  @Test
+  public void test_retrieveUsedSegments_withOverlapsCondition()
+  {
+    Interval queryInterval = new 
Interval("2025-01-03T00:00:00.000Z/2025-01-05T00:00:00.000Z");
+
+    Set<DataSegment> result = readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval)));
+
+    Assert.assertEquals(4, result.size());
+    assertSegmentsInInterval(result, queryInterval);
+  }
+
+  @Test
+  public void 
test_retrieveUsedSegments_withOverlapsConditionAndUnusedSegments()
+  {
+    final Set<DataSegment> segmentsToUpdate = 
Set.of(WIKI_SEGMENTS_2X5D.get(2));
+    int numUpdatedSegments = update(
+        sql -> sql.markSegmentsAsUnused(getIds(segmentsToUpdate), 
DateTimes.nowUtc()));
+    Assert.assertEquals(1, numUpdatedSegments);
+
+    Interval queryInterval = new 
Interval("2025-01-01T00:00:00.000Z/2025-01-03T00:00:00.000Z");
+
+    Set<DataSegment> result = readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval)));
+
+    Assert.assertEquals(3, result.size());
+    assertSegmentsInInterval(result, queryInterval);
+  }
+
+  @Test
+  public void test_retrieveUsedSegments_withOverlapsCondition_near_end_date()
+  {
+    Interval queryInterval = new 
Interval("2025-01-05T00:00:00.000Z/2025-01-06T00:00:00.000Z");

Review Comment:
   Don't use hardcoded dates. Try to form the Interval using the constant 
`JAN_1`.



##########
benchmarks/src/test/java/org/apache/druid/benchmark/indexing/SqlSegmentsMetadataQueryBenchmark.java:
##########
@@ -0,0 +1,138 @@
+package org.apache.druid.benchmark.indexing;
+
+import com.google.common.collect.ImmutableSet;
+import org.apache.druid.java.util.common.DateTimes;
+import org.apache.druid.java.util.common.granularity.Granularities;
+import org.apache.druid.java.util.common.parsers.CloseableIterator;
+import org.apache.druid.metadata.IndexerSqlMetadataStorageCoordinatorTestBase;
+import org.apache.druid.metadata.MetadataStorageTablesConfig;
+import org.apache.druid.metadata.SqlSegmentsMetadataQuery;
+import org.apache.druid.metadata.TestDerbyConnector;
+import org.apache.druid.segment.TestDataSource;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.server.coordinator.CreateDataSegments;
+import org.apache.druid.timeline.DataSegment;
+import org.joda.time.DateTime;
+import org.joda.time.Interval;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Function;
+
+@State(Scope.Benchmark)
+@Fork(value = 1)
+@Warmup(iterations = 1, time = 1)
+@Measurement(iterations = 20, time = 2)
+@BenchmarkMode({Mode.AverageTime})
+@OutputTimeUnit(TimeUnit.NANOSECONDS)
+public class SqlSegmentsMetadataQueryBenchmark
+{
+
+  private static final DateTime JAN_1 = DateTimes.of("2025-01-01");
+  private static final String V1 = JAN_1.toString();
+  private static final List<DataSegment> WIKI_SEGMENTS_1000X100D
+      = CreateDataSegments.ofDatasource(TestDataSource.WIKI)
+                          .forIntervals(100, Granularities.DAY)
+                          .withNumPartitions(1000)
+                          .startingAt(JAN_1)
+                          .withVersion(V1)
+                          .eachOfSizeInMb(500);
+
+  private TestDerbyConnector derbyConnector;
+
+  @Setup(Level.Trial)
+  public void setup() throws Exception
+  {
+    this.derbyConnector = new TestDerbyConnector("druidBench");
+    derbyConnector.getDBI().open().close();

Review Comment:
   Add a method `createDatabase()` to `TestDerbyConnector` which internally 
does `getDBI().open().close()`.



##########
server/src/test/java/org/apache/druid/metadata/SqlSegmentsMetadataQueryTest.java:
##########
@@ -238,6 +238,56 @@ public void 
test_retrieveSegmentForId_returnsNull_forUnknownId()
     );
   }
 
+  @Test
+  public void test_retrieveUsedSegments_withOverlapsCondition()
+  {
+    Interval queryInterval = new 
Interval("2025-01-03T00:00:00.000Z/2025-01-05T00:00:00.000Z");
+
+    Set<DataSegment> result = readAsSet(q -> 
q.retrieveUsedSegments(TestDataSource.WIKI, List.of(queryInterval)));
+
+    Assert.assertEquals(4, result.size());
+    assertSegmentsInInterval(result, queryInterval);
+  }
+
+  @Test
+  public void 
test_retrieveUsedSegments_withOverlapsConditionAndUnusedSegments()

Review Comment:
   ```suggestion
     public void 
test_retrieveUsedSegments_withOverlapsCondition_andUnusedSegments()
   ```



##########
server/src/test/java/org/apache/druid/metadata/TestDerbyConnector.java:
##########
@@ -74,8 +75,29 @@ protected TestDerbyConnector(
       CentralizedDatasourceSchemaConfig centralizedDatasourceSchemaConfig
   )
   {
-    super(new NoopMetadataStorageProvider().get(), config, dbTables, new 
DBI(jdbcUri + ";create=true"), centralizedDatasourceSchemaConfig);
+    super(
+        new NoopMetadataStorageProvider().get(),
+        config,
+        dbTables,
+        new DBI(jdbcUri + ";create=true"),
+        centralizedDatasourceSchemaConfig
+    );
     this.jdbcUri = jdbcUri;
+    this.dbTables = dbTables;
+  }
+
+  public TestDerbyConnector(String baseName)

Review Comment:
   We shouldn't need to specify a baseName here. Just use `druidTest` by 
default.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to