github-advanced-security[bot] commented on code in PR #15817:
URL: https://github.com/apache/druid/pull/15817#discussion_r1570082012


##########
server/src/main/java/org/apache/druid/segment/realtime/appenderator/TaskSegmentSchemaUtil.java:
##########
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.segment.realtime.appenderator;
+
+import org.apache.druid.java.util.common.Stopwatch;
+import org.apache.druid.query.aggregation.AggregatorFactory;
+import org.apache.druid.segment.IndexIO;
+import org.apache.druid.segment.QueryableIndex;
+import org.apache.druid.segment.QueryableIndexStorageAdapter;
+import org.apache.druid.segment.SchemaPayload;
+import org.apache.druid.segment.SchemaPayloadPlus;
+import org.apache.druid.segment.StorageAdapter;
+import org.apache.druid.segment.column.RowSignature;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+public class TaskSegmentSchemaUtil
+{
+  /**
+   * Generates segment schema from the segment file.
+   */
+  public static SchemaPayloadPlus getSegmentSchema(File segmentFile, IndexIO 
indexIO) throws IOException
+  {
+    Stopwatch stopwatch = Stopwatch.createStarted();

Review Comment:
   ## Unread local variable
   
   Variable 'Stopwatch stopwatch' is never read.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/7289)



##########
processing/src/main/java/org/apache/druid/segment/DataSegmentWithSchemas.java:
##########
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.segment;
+
+import com.fasterxml.jackson.annotation.JsonCreator;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import org.apache.druid.timeline.DataSegment;
+
+import javax.annotation.Nullable;
+import java.util.HashSet;
+import java.util.Set;
+
+/**
+ * Encapsulates segment metadata and corresponding schema.
+ */
+public class DataSegmentWithSchemas
+{
+  private final Set<DataSegment> segments;
+
+  @Nullable
+  private final SegmentSchemaMapping segmentSchemaMapping;
+
+  public DataSegmentWithSchemas(int schemaVersion)
+  {
+    this.segments = new HashSet<>();
+    this.segmentSchemaMapping = new SegmentSchemaMapping(schemaVersion);
+  }
+
+  @JsonCreator
+  public DataSegmentWithSchemas(
+      @JsonProperty("segments") Set<DataSegment> segments,
+      @JsonProperty("segmentSchemaMapping") @Nullable SegmentSchemaMapping 
segmentSchemaMapping
+  )
+  {
+    this.segments = segments;
+    this.segmentSchemaMapping = segmentSchemaMapping;
+  }
+
+  @JsonProperty
+  public Set<DataSegment> getSegments()

Review Comment:
   ## Exposing internal representation
   
   getSegments exposes the internal representation stored in field segments. 
The value may be modified [after this call to getSegments](1).
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/7290)



##########
server/src/test/java/org/apache/druid/metadata/IndexerSqlMetadataStorageCoordinatorTestBase.java:
##########
@@ -0,0 +1,563 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.metadata;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import org.apache.druid.java.util.common.DateTimes;
+import org.apache.druid.java.util.common.ISE;
+import org.apache.druid.java.util.common.Intervals;
+import org.apache.druid.java.util.common.Pair;
+import org.apache.druid.java.util.common.StringUtils;
+import org.apache.druid.java.util.common.jackson.JacksonUtils;
+import org.apache.druid.java.util.common.parsers.CloseableIterator;
+import org.apache.druid.segment.SegmentSchemaMapping;
+import org.apache.druid.segment.TestHelper;
+import org.apache.druid.segment.metadata.CentralizedDatasourceSchemaConfig;
+import org.apache.druid.segment.metadata.FingerprintGenerator;
+import org.apache.druid.segment.metadata.SegmentSchemaManager;
+import org.apache.druid.segment.metadata.SegmentSchemaTestUtils;
+import org.apache.druid.server.http.DataSegmentPlus;
+import org.apache.druid.timeline.DataSegment;
+import org.apache.druid.timeline.SegmentId;
+import org.apache.druid.timeline.partition.LinearShardSpec;
+import org.apache.druid.timeline.partition.NoneShardSpec;
+import org.apache.druid.timeline.partition.NumberedShardSpec;
+import org.apache.druid.timeline.partition.ShardSpec;
+import org.apache.druid.timeline.partition.TombstoneShardSpec;
+import org.joda.time.DateTime;
+import org.joda.time.Interval;
+import org.junit.Assert;
+import org.skife.jdbi.v2.PreparedBatch;
+import org.skife.jdbi.v2.ResultIterator;
+import org.skife.jdbi.v2.util.StringMapper;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+public class IndexerSqlMetadataStorageCoordinatorTestBase
+{
+  protected static final int MAX_SQL_MEATADATA_RETRY_FOR_TEST = 2;
+
+  protected final ObjectMapper mapper = TestHelper.makeJsonMapper();
+
+  protected final DataSegment defaultSegment = new DataSegment(
+      "fooDataSource",
+      Intervals.of("2015-01-01T00Z/2015-01-02T00Z"),
+      "version",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new LinearShardSpec(0),
+      9,
+      100
+  );
+
+  protected final DataSegment eternitySegment = new DataSegment(
+      "fooDataSource",
+      Intervals.ETERNITY,
+      "version",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new LinearShardSpec(0),
+      9,
+      100
+  );
+
+
+  protected final DataSegment firstHalfEternityRangeSegment = new DataSegment(
+      "fooDataSource",
+      new Interval(DateTimes.MIN, DateTimes.of("3000")),
+      "version",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new LinearShardSpec(0),
+      9,
+      100
+  );
+
+  protected final DataSegment secondHalfEternityRangeSegment = new DataSegment(
+      "fooDataSource",
+      new Interval(DateTimes.of("1970"), DateTimes.MAX),
+      "version",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new LinearShardSpec(0),
+      9,
+      100
+  );
+  protected final DataSegment defaultSegment2 = new DataSegment(
+      "fooDataSource",
+      Intervals.of("2015-01-01T00Z/2015-01-02T00Z"),
+      "version",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new LinearShardSpec(1),
+      9,
+      100
+  );
+
+  protected final DataSegment defaultSegment2WithBiggerSize = new DataSegment(
+      "fooDataSource",
+      Intervals.of("2015-01-01T00Z/2015-01-02T00Z"),
+      "version",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new LinearShardSpec(1),
+      9,
+      200
+  );
+
+  protected final DataSegment defaultSegment3 = new DataSegment(
+      "fooDataSource",
+      Intervals.of("2015-01-03T00Z/2015-01-04T00Z"),
+      "version",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      NoneShardSpec.instance(),
+      9,
+      100
+  );
+
+  // Overshadows defaultSegment, defaultSegment2
+  protected final DataSegment defaultSegment4 = new DataSegment(
+      "fooDataSource",
+      Intervals.of("2015-01-01T00Z/2015-01-02T00Z"),
+      "zversion",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new LinearShardSpec(0),
+      9,
+      100
+  );
+
+  protected final DataSegment numberedSegment0of0 = new DataSegment(
+      "fooDataSource",
+      Intervals.of("2015-01-01T00Z/2015-01-02T00Z"),
+      "zversion",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new NumberedShardSpec(0, 0),
+      9,
+      100
+  );
+
+  protected final DataSegment numberedSegment1of0 = new DataSegment(
+      "fooDataSource",
+      Intervals.of("2015-01-01T00Z/2015-01-02T00Z"),
+      "zversion",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new NumberedShardSpec(1, 0),
+      9,
+      100
+  );
+
+  protected final DataSegment numberedSegment2of0 = new DataSegment(
+      "fooDataSource",
+      Intervals.of("2015-01-01T00Z/2015-01-02T00Z"),
+      "zversion",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new NumberedShardSpec(2, 0),
+      9,
+      100
+  );
+
+  protected final DataSegment numberedSegment2of1 = new DataSegment(
+      "fooDataSource",
+      Intervals.of("2015-01-01T00Z/2015-01-02T00Z"),
+      "zversion",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new NumberedShardSpec(2, 1),
+      9,
+      100
+  );
+
+  protected final DataSegment numberedSegment3of1 = new DataSegment(
+      "fooDataSource",
+      Intervals.of("2015-01-01T00Z/2015-01-02T00Z"),
+      "zversion",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new NumberedShardSpec(3, 1),
+      9,
+      100
+  );
+
+  protected final DataSegment existingSegment1 = new DataSegment(
+      "fooDataSource",
+      Intervals.of("1994-01-01T00Z/1994-01-02T00Z"),
+      "zversion",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new NumberedShardSpec(1, 1),
+      9,
+      100
+  );
+
+  protected final DataSegment existingSegment2 = new DataSegment(
+      "fooDataSource",
+      Intervals.of("1994-01-02T00Z/1994-01-03T00Z"),
+      "zversion",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new NumberedShardSpec(1, 1),
+      9,
+      100
+  );
+
+  protected final DataSegment hugeTimeRangeSegment1 = new DataSegment(
+      "hugeTimeRangeDataSource",
+      Intervals.of("-9994-01-02T00Z/1994-01-03T00Z"),
+      "zversion",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new NumberedShardSpec(0, 1),
+      9,
+      100
+  );
+
+  protected final DataSegment hugeTimeRangeSegment2 = new DataSegment(
+      "hugeTimeRangeDataSource",
+      Intervals.of("2994-01-02T00Z/2994-01-03T00Z"),
+      "zversion",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new NumberedShardSpec(0, 1),
+      9,
+      100
+  );
+
+  protected final DataSegment hugeTimeRangeSegment3 = new DataSegment(
+      "hugeTimeRangeDataSource",
+      Intervals.of("29940-01-02T00Z/29940-01-03T00Z"),
+      "zversion",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new NumberedShardSpec(0, 1),
+      9,
+      100
+  );
+
+  protected final DataSegment hugeTimeRangeSegment4 = new DataSegment(
+      "hugeTimeRangeDataSource",
+      Intervals.of("1990-01-01T00Z/19940-01-01T00Z"),
+      "zversion",
+      ImmutableMap.of(),
+      ImmutableList.of("dim1"),
+      ImmutableList.of("m1"),
+      new NumberedShardSpec(0, 1),
+      9,
+      100
+  );
+
+  protected final Set<DataSegment> SEGMENTS = ImmutableSet.of(defaultSegment, 
defaultSegment2);
+  protected final AtomicLong metadataUpdateCounter = new AtomicLong();
+  protected final AtomicLong segmentTableDropUpdateCounter = new AtomicLong();
+
+  protected IndexerSQLMetadataStorageCoordinator coordinator;
+  protected TestDerbyConnector derbyConnector;
+  protected TestDerbyConnector.SegmentsTable segmentsTable;
+  protected SegmentSchemaManager segmentSchemaManager;
+  protected FingerprintGenerator fingerprintGenerator;
+  protected SegmentSchemaTestUtils segmentSchemaTestUtils;
+
+  protected static class DS
+  {
+    static final String WIKI = "wiki";
+  }
+
+  protected DataSegment createSegment(Interval interval, String version, 
ShardSpec shardSpec)
+  {
+    return DataSegment.builder()
+                      .dataSource(DS.WIKI)
+                      .interval(interval)
+                      .version(version)
+                      .shardSpec(shardSpec)
+                      .size(100)
+                      .build();
+  }
+
+  protected List<DataSegment> createAndGetUsedYearSegments(final int 
startYear, final int endYear) throws IOException
+  {
+    final List<DataSegment> segments = new ArrayList<>();
+
+    for (int year = startYear; year < endYear; year++) {
+      segments.add(createSegment(
+          Intervals.of("%d/%d", year, year + 1),
+          "version",
+          new LinearShardSpec(0))
+      );
+    }
+    final Set<DataSegment> segmentsSet = new HashSet<>(segments);
+    final Set<DataSegment> committedSegments = 
coordinator.commitSegments(segmentsSet, new SegmentSchemaMapping(
+        CentralizedDatasourceSchemaConfig.SCHEMA_VERSION));
+    Assert.assertTrue(committedSegments.containsAll(segmentsSet));
+
+    return segments;
+  }
+
+  protected ImmutableList<DataSegment> retrieveUnusedSegments(
+      final List<Interval> intervals,
+      final Integer limit,
+      final String lastSegmentId,
+      final SortOrder sortOrder,
+      final DateTime maxUsedStatusLastUpdatedTime,
+      final MetadataStorageTablesConfig tablesConfig
+  )
+  {
+    return derbyConnector.inReadOnlyTransaction(
+        (handle, status) -> {
+          try (final CloseableIterator<DataSegment> iterator =
+                   SqlSegmentsMetadataQuery.forHandle(
+                                               handle,
+                                               derbyConnector,
+                                               tablesConfig,
+                                               mapper
+                                           )
+                                           .retrieveUnusedSegments(DS.WIKI, 
intervals, null, limit, lastSegmentId, sortOrder, 
maxUsedStatusLastUpdatedTime)) {
+            return ImmutableList.copyOf(iterator);
+          }
+        }
+    );
+  }
+
+  protected ImmutableList<DataSegmentPlus> retrieveUnusedSegmentsPlus(
+      final List<Interval> intervals,
+      final Integer limit,
+      final String lastSegmentId,
+      final SortOrder sortOrder,
+      final DateTime maxUsedStatusLastUpdatedTime,
+      MetadataStorageTablesConfig tablesConfig
+  )
+  {
+    return derbyConnector.inReadOnlyTransaction(
+        (handle, status) -> {
+          try (final CloseableIterator<DataSegmentPlus> iterator =
+                   SqlSegmentsMetadataQuery.forHandle(
+                                               handle,
+                                               derbyConnector,
+                                               tablesConfig,
+                                               mapper
+                                           )
+                                           
.retrieveUnusedSegmentsPlus(DS.WIKI, intervals, null, limit, lastSegmentId, 
sortOrder, maxUsedStatusLastUpdatedTime)) {
+            return ImmutableList.copyOf(iterator);
+          }
+        }
+    );
+  }
+
+  protected void verifyContainsAllSegmentsPlus(
+      List<DataSegment> expectedSegments,
+      List<DataSegmentPlus> actualUnusedSegmentsPlus,
+      DateTime usedStatusLastUpdatedTime)
+  {
+    Map<SegmentId, DataSegment> expectedIdToSegment = 
expectedSegments.stream().collect(Collectors.toMap(DataSegment::getId, 
Function.identity()));
+    Map<SegmentId, DataSegmentPlus> actualIdToSegmentPlus = 
actualUnusedSegmentsPlus.stream()
+                                                                               
     .collect(Collectors.toMap(d -> d.getDataSegment().getId(), 
Function.identity()));
+    Assert.assertTrue(expectedIdToSegment.entrySet().stream().allMatch(e -> {
+      DataSegmentPlus segmentPlus = actualIdToSegmentPlus.get(e.getKey());
+      return segmentPlus != null
+             && 
!segmentPlus.getCreatedDate().isAfter(usedStatusLastUpdatedTime)
+             && segmentPlus.getUsedStatusLastUpdatedDate() != null
+             && 
segmentPlus.getUsedStatusLastUpdatedDate().equals(usedStatusLastUpdatedTime);
+    }));
+  }
+
+  protected void verifyEqualsAllSegmentsPlus(
+      List<DataSegment> expectedSegments,
+      List<DataSegmentPlus> actualUnusedSegmentsPlus,
+      DateTime usedStatusLastUpdatedTime
+  )
+  {
+    Assert.assertEquals(expectedSegments.size(), 
actualUnusedSegmentsPlus.size());
+    for (int i = 0; i < expectedSegments.size(); i++) {
+      DataSegment expectedSegment = expectedSegments.get(i);
+      DataSegmentPlus actualSegmentPlus = actualUnusedSegmentsPlus.get(i);
+      Assert.assertEquals(expectedSegment.getId(), 
actualSegmentPlus.getDataSegment().getId());
+      
Assert.assertTrue(!actualSegmentPlus.getCreatedDate().isAfter(usedStatusLastUpdatedTime)
+                        && actualSegmentPlus.getUsedStatusLastUpdatedDate() != 
null
+                        && 
actualSegmentPlus.getUsedStatusLastUpdatedDate().equals(usedStatusLastUpdatedTime));
+    }
+  }
+
+  /**
+   * This test-only shard type is to test the behavior of "old generation" 
tombstones with 1 core partition.
+   */
+  protected static class TombstoneShardSpecWith1CorePartition extends 
TombstoneShardSpec
+  {
+    @Override
+    @JsonProperty("partitions")
+    public int getNumCorePartitions()
+    {
+      return 1;
+    }
+  }
+
+
+  protected void markAllSegmentsUnused(MetadataStorageTablesConfig 
tablesConfig)
+  {
+    markAllSegmentsUnused(SEGMENTS, DateTimes.nowUtc(), tablesConfig);
+  }
+
+  protected void markAllSegmentsUnused(Set<DataSegment> segments, DateTime 
usedStatusLastUpdatedTime, MetadataStorageTablesConfig tablesConfig)

Review Comment:
   ## Useless parameter
   
   The parameter 'tablesConfig' is never used.
   
   [Show more 
details](https://github.com/apache/druid/security/code-scanning/7288)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to