kfaraz commented on code in PR #15817: URL: https://github.com/apache/druid/pull/15817#discussion_r1569878108
########## processing/src/main/java/org/apache/druid/segment/SegmentMetadata.java: ########## @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.segment; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; + +import java.util.Objects; + +/** + * Encapsulates segment level information like numRows, schema fingerprint. + */ +public class SegmentMetadata +{ + final Long numRows; + final String schemaFingerprint; Review Comment: Fields need to be private. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1801,16 +1837,89 @@ public int deletePendingSegments(String dataSource) ); } + private boolean isSchemaPresent(SegmentSchemaMapping segmentSchemaMapping) + { + return publishSchema() + && segmentSchemaMapping != null + && segmentSchemaMapping.isNonEmpty(); + } + + private Map<String, Long> persistSchema( + final Handle handle, + final Set<DataSegment> segments, + final SegmentSchemaMapping segmentSchemaMapping + ) throws JsonProcessingException + { + if (segmentSchemaMapping.getSchemaVersion() != CentralizedDatasourceSchemaConfig.SCHEMA_VERSION) { + log.error( + "Schema version [%d] doesn't match the current version [%d], dropping the schema [%s].", Review Comment: Are we "dropping" the schema or just not persisting it? Please rephrase accordingly. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -473,11 +500,13 @@ public SegmentPublishResult commitReplaceSegments( return connector.retryTransaction( (handle, transactionStatus) -> { final Set<DataSegment> segmentsToInsert = new HashSet<>(replaceSegments); - segmentsToInsert.addAll( - createNewIdsOfAppendSegmentsAfterReplace(handle, replaceSegments, locksHeldByReplaceTask) - ); + + Set<DataSegmentWithSchemaInformation> appendAfterReplaceSegmentMetadata = + createNewIdsOfAppendSegmentsAfterReplace(handle, replaceSegments, locksHeldByReplaceTask); + SegmentPublishResult result = SegmentPublishResult.ok( - insertSegments(handle, segmentsToInsert) + insertSegments(handle, segmentsToInsert, + segmentSchemaMapping, appendAfterReplaceSegmentMetadata, Collections.emptyMap()) Review Comment: Please fix formatting. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -2694,4 +2909,71 @@ public String toString() } } + private static class DataSegmentWithSchemaInformation Review Comment: Why can't we just use `DataSegmentPlus`? It already has fields `dataSegment`, `schemaId`, `numRows`. The other fields, `createdDate`, `usedStatusLastUpdated` are all nullable. Please reuse that class. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -2147,17 +2291,53 @@ private Set<DataSegment> insertSegments(Handle handle, Set<DataSegment> segments final PreparedBatch batch = handle.prepareBatch(buildSqlToInsertSegments()); for (List<DataSegment> partition : partitionedSegments) { for (DataSegment segment : partition) { - batch.add() - .bind("id", segment.getId().toString()) - .bind("dataSource", segment.getDataSource()) - .bind("created_date", now) - .bind("start", segment.getInterval().getStart().toString()) - .bind("end", segment.getInterval().getEnd().toString()) - .bind("partitioned", (segment.getShardSpec() instanceof NoneShardSpec) ? false : true) - .bind("version", segment.getVersion()) - .bind("used", true) - .bind("payload", jsonMapper.writeValueAsBytes(segment)) - .bind("used_status_last_updated", now); + String segmentId = segment.getId().toString(); + Long schemaId = null; + Long numRows = null; + + if (schemaPresent && + (minimalSegmentSchemas.getSegmentIdToMetadataMap().containsKey(segmentId) || + (newVersionForAppendToParent.containsKey(segment.getId()) + && minimalSegmentSchemas.getSegmentIdToMetadataMap() + .containsKey(newVersionForAppendToParent.get(segment.getId()).toString())))) { + String segmentIdToUse; + if (minimalSegmentSchemas.getSegmentIdToMetadataMap().containsKey(segmentId)) { + segmentIdToUse = segmentId; + } else { + segmentIdToUse = newVersionForAppendToParent.get(segment.getId()).toString(); Review Comment: +1, this entire method needs to be simplified. ########## server/src/main/java/org/apache/druid/segment/metadata/KillUnreferencedSegmentSchemas.java: ########## @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.segment.metadata; + +import com.google.inject.Inject; +import org.apache.druid.guice.LazySingleton; +import org.apache.druid.java.util.emitter.EmittingLogger; +import org.apache.druid.metadata.SegmentsMetadataManager; + +import java.util.List; + +/** + * This class deals with cleaning schema which is not referenced by any used segment. + * <p> + * <ol> + * <li>If a schema is not referenced, UPDATE schemas SET used = false, used_status_last_updated = now</li> + * <li>DELETE FROM schemas WHERE used = false AND used_status_last_updated < 6 hours ago</li> + * <li>When creating a new segment, try to find schema for the fingerprint of the segment.</li> + * <ol type="a"> + * <li> If no record found, create a new one.</li> + * <li> If record found which has used = true, reuse this schema_id.</li> + * <li> If record found which has used = false, UPDATE SET used = true, used_status_last_updated = now</li> + * </ol> + * </ol> + * </p> + * <p> + * Possible race conditions: + * <ol type="a"> + * <li> Between ops 1 and 3b: In other words, we might end up with a segment that points to a schema that has just been marked as unused. This can be repaired by the coordinator duty. </li> + * <li> Between 2 and 3c: This can be handled. Either 2 will fail to update any rows (good case) or 3c will fail to update any rows and thus return 0 (bad case). In the bad case, we need to recreate the schema, same as step 3a. </li> + * </ol> + * </p> + */ +@LazySingleton +public class KillUnreferencedSegmentSchemas +{ + private static final EmittingLogger log = new EmittingLogger(KillUnreferencedSegmentSchemas.class); + private final SegmentSchemaManager segmentSchemaManager; + private final SegmentsMetadataManager metadataManager; + + @Inject + public KillUnreferencedSegmentSchemas( + SegmentSchemaManager segmentSchemaManager, + SegmentsMetadataManager metadataManager + ) + { + this.segmentSchemaManager = segmentSchemaManager; + this.metadataManager = metadataManager; + } + + public int cleanup(long timestamp) + { + // 1: Identify unreferenced schema and mark them as unused. These will get deleted after a fixed period. + int unused = segmentSchemaManager.identifyAndMarkSchemaUnused(); + log.info("Identified [%s] unreferenced schema. Marking them as unused.", unused); Review Comment: At this point, you have already marked the schema as unused. It is also possible that the method above returned 0. So we should log only if the numUpdated > 0. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -444,7 +468,9 @@ public SegmentPublishResult commitSegmentsAndMetadata( } } - final Set<DataSegment> inserted = announceHistoricalSegmentBatch(handle, segments, usedSegments); + final Set<DataSegment> inserted = announceHistoricalSegmentBatch(handle, segments, usedSegments, + segmentSchemaMapping + ); Review Comment: Nit: formatting ```suggestion final Set<DataSegment> inserted = announceHistoricalSegmentBatch(handle, segments, usedSegments, segmentSchemaMapping); ``` ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1801,16 +1837,89 @@ public int deletePendingSegments(String dataSource) ); } + private boolean isSchemaPresent(SegmentSchemaMapping segmentSchemaMapping) + { + return publishSchema() + && segmentSchemaMapping != null + && segmentSchemaMapping.isNonEmpty(); + } + + private Map<String, Long> persistSchema( + final Handle handle, + final Set<DataSegment> segments, + final SegmentSchemaMapping segmentSchemaMapping + ) throws JsonProcessingException + { + if (segmentSchemaMapping.getSchemaVersion() != CentralizedDatasourceSchemaConfig.SCHEMA_VERSION) { + log.error( + "Schema version [%d] doesn't match the current version [%d], dropping the schema [%s].", + segmentSchemaMapping.getSchemaVersion(), + CentralizedDatasourceSchemaConfig.SCHEMA_VERSION, + segmentSchemaMapping + ); + return null; + } + String dataSource = segments.stream().iterator().next().getDataSource(); + + log.info("Persisting segment schema: [%s].", segmentSchemaMapping); + + segmentSchemaManager.persistSegmentSchema( + handle, + dataSource, + segmentSchemaMapping.getSchemaFingerprintToPayloadMap(), + segmentSchemaMapping.getSchemaVersion() + ); + + // fetch schemaId + Map<String, Long> fingerprintSchemaIdMap = + segmentSchemaManager.schemaIdFetchBatch( + handle, + dataSource, + segmentSchemaMapping.getSchemaVersion(), + segmentSchemaMapping.getSchemaFingerprintToPayloadMap().keySet() + ); + + log.info("Fingerprint schema map is [%s]", fingerprintSchemaIdMap); + + return fingerprintSchemaIdMap; + } + + private Pair<Long, Long> fetchSegmentSchemaInformation( Review Comment: Fetch implies getting something from the underlying metadata store. You can call this method `get` or `extract`. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -2585,6 +2794,12 @@ public int deleteUpgradeSegmentsForTask(final String taskId) ); } + private boolean publishSchema() + { + return centralizedDatasourceSchemaConfig.isEnabled() + && !centralizedDatasourceSchemaConfig.isTaskSchemaPublishDisabled(); Review Comment: Just assign the value of this to a boolean member field `shouldPublishSchema`. No need to check the config every time a commit happens. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1829,8 +1938,18 @@ private Set<DataSegment> announceHistoricalSegmentBatch( PreparedBatch preparedBatch = handle.prepareBatch(buildSqlToInsertSegments()); for (List<DataSegment> partition : partitionedSegments) { for (DataSegment segment : partition) { - preparedBatch.add() - .bind("id", segment.getId().toString()) + String segmentId = segment.getId().toString(); + Long schemaId = null; + Long numRows = null; + + if (schemaPresent && segmentSchemaMapping.getSegmentIdToMetadataMap().containsKey(segmentId)) { + Pair<Long, Long> schemaInfoPair = fetchSegmentSchemaInformation(segmentSchemaMapping, segmentId, fingerprintSchemaIdMap); Review Comment: Line exceeds hard limit of 120 chars. ```suggestion Pair<Long, Long> schemaInfoPair = fetchSegmentSchemaInformation(segmentSchemaMapping, segmentId, fingerprintSchemaIdMap); ``` ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1801,16 +1837,89 @@ public int deletePendingSegments(String dataSource) ); } + private boolean isSchemaPresent(SegmentSchemaMapping segmentSchemaMapping) + { + return publishSchema() + && segmentSchemaMapping != null + && segmentSchemaMapping.isNonEmpty(); + } + + private Map<String, Long> persistSchema( + final Handle handle, + final Set<DataSegment> segments, + final SegmentSchemaMapping segmentSchemaMapping + ) throws JsonProcessingException + { + if (segmentSchemaMapping.getSchemaVersion() != CentralizedDatasourceSchemaConfig.SCHEMA_VERSION) { + log.error( Review Comment: I suppose this can only happen during a rolling upgrade, when a task wants a new version of schema that overlord is not aware of. The log line should mention that schema backfill will take care of schema not being persisted right now. ########## server/src/main/java/org/apache/druid/segment/metadata/KillUnreferencedSegmentSchemas.java: ########## @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.segment.metadata; + +import com.google.inject.Inject; +import org.apache.druid.guice.LazySingleton; +import org.apache.druid.java.util.emitter.EmittingLogger; +import org.apache.druid.metadata.SegmentsMetadataManager; + +import java.util.List; + +/** + * This class deals with cleaning schema which is not referenced by any used segment. + * <p> + * <ol> + * <li>If a schema is not referenced, UPDATE schemas SET used = false, used_status_last_updated = now</li> + * <li>DELETE FROM schemas WHERE used = false AND used_status_last_updated < 6 hours ago</li> + * <li>When creating a new segment, try to find schema for the fingerprint of the segment.</li> + * <ol type="a"> + * <li> If no record found, create a new one.</li> + * <li> If record found which has used = true, reuse this schema_id.</li> + * <li> If record found which has used = false, UPDATE SET used = true, used_status_last_updated = now</li> + * </ol> + * </ol> + * </p> + * <p> + * Possible race conditions: + * <ol type="a"> + * <li> Between ops 1 and 3b: In other words, we might end up with a segment that points to a schema that has just been marked as unused. This can be repaired by the coordinator duty. </li> + * <li> Between 2 and 3c: This can be handled. Either 2 will fail to update any rows (good case) or 3c will fail to update any rows and thus return 0 (bad case). In the bad case, we need to recreate the schema, same as step 3a. </li> + * </ol> + * </p> + */ +@LazySingleton +public class KillUnreferencedSegmentSchemas +{ + private static final EmittingLogger log = new EmittingLogger(KillUnreferencedSegmentSchemas.class); + private final SegmentSchemaManager segmentSchemaManager; + private final SegmentsMetadataManager metadataManager; + + @Inject + public KillUnreferencedSegmentSchemas( + SegmentSchemaManager segmentSchemaManager, + SegmentsMetadataManager metadataManager + ) + { + this.segmentSchemaManager = segmentSchemaManager; + this.metadataManager = metadataManager; + } + + public int cleanup(long timestamp) + { + // 1: Identify unreferenced schema and mark them as unused. These will get deleted after a fixed period. + int unused = segmentSchemaManager.identifyAndMarkSchemaUnused(); + log.info("Identified [%s] unreferenced schema. Marking them as unused.", unused); Review Comment: ```suggestion int numSchemasMarkedAsUnused = segmentSchemaManager.identifyAndMarkSchemaUnused(); if (numSchemasMarkedAsUnused > 0) log.info("Marked [%d] unreferenced schemas as unused.", numSchemasMarkedAsUnused); ``` ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1801,16 +1837,89 @@ public int deletePendingSegments(String dataSource) ); } + private boolean isSchemaPresent(SegmentSchemaMapping segmentSchemaMapping) + { + return publishSchema() + && segmentSchemaMapping != null + && segmentSchemaMapping.isNonEmpty(); + } + + private Map<String, Long> persistSchema( + final Handle handle, + final Set<DataSegment> segments, + final SegmentSchemaMapping segmentSchemaMapping + ) throws JsonProcessingException + { + if (segmentSchemaMapping.getSchemaVersion() != CentralizedDatasourceSchemaConfig.SCHEMA_VERSION) { + log.error( + "Schema version [%d] doesn't match the current version [%d], dropping the schema [%s].", + segmentSchemaMapping.getSchemaVersion(), + CentralizedDatasourceSchemaConfig.SCHEMA_VERSION, + segmentSchemaMapping + ); + return null; + } + String dataSource = segments.stream().iterator().next().getDataSource(); + + log.info("Persisting segment schema: [%s].", segmentSchemaMapping); + + segmentSchemaManager.persistSegmentSchema( + handle, + dataSource, + segmentSchemaMapping.getSchemaFingerprintToPayloadMap(), + segmentSchemaMapping.getSchemaVersion() + ); + + // fetch schemaId + Map<String, Long> fingerprintSchemaIdMap = + segmentSchemaManager.schemaIdFetchBatch( + handle, + dataSource, + segmentSchemaMapping.getSchemaVersion(), + segmentSchemaMapping.getSchemaFingerprintToPayloadMap().keySet() + ); + + log.info("Fingerprint schema map is [%s]", fingerprintSchemaIdMap); + + return fingerprintSchemaIdMap; + } + + private Pair<Long, Long> fetchSegmentSchemaInformation( + SegmentSchemaMapping segmentSchemaMapping, + String segmentId, + Map<String, Long> fingerprintSchemaIdMap + ) + { + Long schemaId = null, numRows = null; Review Comment: Nit: Please break up the declaration into separate lines. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1801,16 +1837,89 @@ public int deletePendingSegments(String dataSource) ); } + private boolean isSchemaPresent(SegmentSchemaMapping segmentSchemaMapping) + { + return publishSchema() + && segmentSchemaMapping != null + && segmentSchemaMapping.isNonEmpty(); + } + + private Map<String, Long> persistSchema( + final Handle handle, + final Set<DataSegment> segments, + final SegmentSchemaMapping segmentSchemaMapping + ) throws JsonProcessingException + { + if (segmentSchemaMapping.getSchemaVersion() != CentralizedDatasourceSchemaConfig.SCHEMA_VERSION) { + log.error( + "Schema version [%d] doesn't match the current version [%d], dropping the schema [%s].", + segmentSchemaMapping.getSchemaVersion(), + CentralizedDatasourceSchemaConfig.SCHEMA_VERSION, + segmentSchemaMapping + ); + return null; + } + String dataSource = segments.stream().iterator().next().getDataSource(); + + log.info("Persisting segment schema: [%s].", segmentSchemaMapping); + + segmentSchemaManager.persistSegmentSchema( + handle, + dataSource, + segmentSchemaMapping.getSchemaFingerprintToPayloadMap(), + segmentSchemaMapping.getSchemaVersion() + ); + + // fetch schemaId + Map<String, Long> fingerprintSchemaIdMap = + segmentSchemaManager.schemaIdFetchBatch( + handle, + dataSource, + segmentSchemaMapping.getSchemaVersion(), + segmentSchemaMapping.getSchemaFingerprintToPayloadMap().keySet() + ); + + log.info("Fingerprint schema map is [%s]", fingerprintSchemaIdMap); Review Comment: Why does this need to be logged? ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1801,16 +1837,89 @@ public int deletePendingSegments(String dataSource) ); } + private boolean isSchemaPresent(SegmentSchemaMapping segmentSchemaMapping) + { + return publishSchema() + && segmentSchemaMapping != null + && segmentSchemaMapping.isNonEmpty(); + } + + private Map<String, Long> persistSchema( + final Handle handle, + final Set<DataSegment> segments, + final SegmentSchemaMapping segmentSchemaMapping + ) throws JsonProcessingException + { + if (segmentSchemaMapping.getSchemaVersion() != CentralizedDatasourceSchemaConfig.SCHEMA_VERSION) { + log.error( + "Schema version [%d] doesn't match the current version [%d], dropping the schema [%s].", + segmentSchemaMapping.getSchemaVersion(), + CentralizedDatasourceSchemaConfig.SCHEMA_VERSION, + segmentSchemaMapping + ); + return null; + } + String dataSource = segments.stream().iterator().next().getDataSource(); + + log.info("Persisting segment schema: [%s].", segmentSchemaMapping); + + segmentSchemaManager.persistSegmentSchema( + handle, + dataSource, + segmentSchemaMapping.getSchemaFingerprintToPayloadMap(), + segmentSchemaMapping.getSchemaVersion() + ); + + // fetch schemaId + Map<String, Long> fingerprintSchemaIdMap = + segmentSchemaManager.schemaIdFetchBatch( + handle, + dataSource, + segmentSchemaMapping.getSchemaVersion(), + segmentSchemaMapping.getSchemaFingerprintToPayloadMap().keySet() + ); + + log.info("Fingerprint schema map is [%s]", fingerprintSchemaIdMap); + + return fingerprintSchemaIdMap; + } + + private Pair<Long, Long> fetchSegmentSchemaInformation( + SegmentSchemaMapping segmentSchemaMapping, + String segmentId, + Map<String, Long> fingerprintSchemaIdMap + ) + { + Long schemaId = null, numRows = null; + SegmentMetadata segmentMetadata = segmentSchemaMapping.getSegmentIdToMetadataMap().get(segmentId); + if (segmentMetadata != null) { + numRows = segmentMetadata.getNumRows(); + String fingerprint = segmentMetadata.getSchemaFingerprint(); + if (fingerprintSchemaIdMap != null && fingerprintSchemaIdMap.containsKey(fingerprint)) { + schemaId = fingerprintSchemaIdMap.get(fingerprint); + } + } + return Pair.of(schemaId, numRows); + } + private Set<DataSegment> announceHistoricalSegmentBatch( final Handle handle, final Set<DataSegment> segments, - final Set<DataSegment> usedSegments + final Set<DataSegment> usedSegments, + @Nullable final SegmentSchemaMapping segmentSchemaMapping ) throws IOException { final Set<DataSegment> toInsertSegments = new HashSet<>(); + Map<String, Long> fingerprintSchemaIdMap = null; + boolean schemaPresent = isSchemaPresent(segmentSchemaMapping); Review Comment: Rename this to `shouldPersistSchema`. `schemaPresent` is very vague. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1801,16 +1837,89 @@ public int deletePendingSegments(String dataSource) ); } + private boolean isSchemaPresent(SegmentSchemaMapping segmentSchemaMapping) + { + return publishSchema() + && segmentSchemaMapping != null + && segmentSchemaMapping.isNonEmpty(); + } + + private Map<String, Long> persistSchema( + final Handle handle, + final Set<DataSegment> segments, + final SegmentSchemaMapping segmentSchemaMapping + ) throws JsonProcessingException + { + if (segmentSchemaMapping.getSchemaVersion() != CentralizedDatasourceSchemaConfig.SCHEMA_VERSION) { + log.error( + "Schema version [%d] doesn't match the current version [%d], dropping the schema [%s].", + segmentSchemaMapping.getSchemaVersion(), + CentralizedDatasourceSchemaConfig.SCHEMA_VERSION, + segmentSchemaMapping + ); + return null; + } + String dataSource = segments.stream().iterator().next().getDataSource(); + + log.info("Persisting segment schema: [%s].", segmentSchemaMapping); Review Comment: Please log only the information that will be helpful for debugging issues in the future. The schema mapping could be a large payload in case of a large number of segments being committed by a batch task. It would be difficult to sift through this information for anything useful. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1829,8 +1938,18 @@ private Set<DataSegment> announceHistoricalSegmentBatch( PreparedBatch preparedBatch = handle.prepareBatch(buildSqlToInsertSegments()); for (List<DataSegment> partition : partitionedSegments) { for (DataSegment segment : partition) { - preparedBatch.add() - .bind("id", segment.getId().toString()) + String segmentId = segment.getId().toString(); Review Comment: Prefer using a concrete `SegmentId` object instead of `String` wherever possible. ########## services/src/main/java/org/apache/druid/cli/ServerRunnable.java: ########## @@ -49,6 +53,8 @@ */ public abstract class ServerRunnable extends GuiceRunnable { + public static final String CENTRALIZED_DATASOURCE_SCHEMA_ENABLED = CentralizedDatasourceSchemaConfig.PROPERTY_PREFIX + ".enabled"; Review Comment: Line exceeds limit of 120 chars: ```suggestion public static final String CENTRALIZED_DATASOURCE_SCHEMA_ENABLED = CentralizedDatasourceSchemaConfig.PROPERTY_PREFIX + ".enabled"; ``` ########## server/src/main/java/org/apache/druid/server/coordinator/duty/KillUnreferencedSegmentSchemaDuty.java: ########## @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.server.coordinator.duty; + +import org.apache.druid.java.util.common.DateTimes; +import org.apache.druid.java.util.common.logger.Logger; +import org.apache.druid.metadata.SegmentsMetadataManager; +import org.apache.druid.segment.metadata.SegmentSchemaManager; +import org.apache.druid.server.coordinator.DruidCoordinatorConfig; +import org.apache.druid.server.coordinator.stats.Stats; +import org.joda.time.DateTime; + +import java.util.List; + +/** + * Coordinator duty to clean up segment schema which are not referenced by any used segment. + * <p> + * <ol> + * <li>If a schema is not referenced, UPDATE schemas SET used = false, used_status_last_updated = now</li> + * <li>DELETE FROM schemas WHERE used = false AND used_status_last_updated < 6 hours ago</li> + * <li>When creating a new segment, try to find schema for the fingerprint of the segment.</li> + * <ol type="a"> + * <li> If no record found, create a new one.</li> + * <li> If record found which has used = true, reuse this schema_id.</li> + * <li> If record found which has used = false, UPDATE SET used = true, used_status_last_updated = now</li> + * </ol> + * </ol> + * </p> + * <p> + * Possible race conditions: + * <ol type="a"> + * <li> Between ops 1 and 3b: In other words, we might end up with a segment that points to a schema that has just been marked as unused. This can be repaired by the coordinator duty. </li> + * <li> Between 2 and 3c: This can be handled. Either 2 will fail to update any rows (good case) or 3c will fail to update any rows and thus return 0 (bad case). In the bad case, we need to recreate the schema, same as step 3a. </li> + * </ol> + * </p> + */ +public class KillUnreferencedSegmentSchemaDuty extends MetadataCleanupDuty +{ + private static final Logger log = new Logger(KillUnreferencedSegmentSchemaDuty.class); + private final SegmentSchemaManager segmentSchemaManager; + private final SegmentsMetadataManager segmentsMetadataManager; + + public KillUnreferencedSegmentSchemaDuty( + DruidCoordinatorConfig config, + SegmentSchemaManager segmentSchemaManager, + SegmentsMetadataManager segmentsMetadataManager + ) + { + super( + "segmentSchema", + "druid.coordinator.kill.segmentSchema", + config.isSegmentSchemaKillEnabled(), + config.getSegmentSchemaKillPeriod(), + config.getSegmentSchemaKillDurationToRetain(), + Stats.Kill.RULES, + config + ); + this.segmentSchemaManager = segmentSchemaManager; + this.segmentsMetadataManager = segmentsMetadataManager; + } + + @Override + protected int cleanupEntriesCreatedBefore(DateTime minCreatedTime) + { + log.info("MinCreatedTime is [%s], currentTime is [%s]", minCreatedTime, DateTimes.nowUtc().toString()); + // 1: Identify unreferenced schema and mark them as unused. These will get deleted after a fixed period. + int unused = segmentSchemaManager.markUnreferencedSchemasAsUnused(); + log.info("Identified [%s] unreferenced schema and marking them as unused.", unused); Review Comment: ```suggestion int numMarkedAsUnused = segmentSchemaManager.markUnreferencedSchemasAsUnused(); if (numMarkedAsUnused > 0) { log.info("Marked [%s] unreferenced schemas and as unused.", numMarkedAsUnused); } ``` ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1801,16 +1837,89 @@ public int deletePendingSegments(String dataSource) ); } + private boolean isSchemaPresent(SegmentSchemaMapping segmentSchemaMapping) + { + return publishSchema() + && segmentSchemaMapping != null + && segmentSchemaMapping.isNonEmpty(); + } + + private Map<String, Long> persistSchema( + final Handle handle, + final Set<DataSegment> segments, + final SegmentSchemaMapping segmentSchemaMapping + ) throws JsonProcessingException + { + if (segmentSchemaMapping.getSchemaVersion() != CentralizedDatasourceSchemaConfig.SCHEMA_VERSION) { + log.error( + "Schema version [%d] doesn't match the current version [%d], dropping the schema [%s].", + segmentSchemaMapping.getSchemaVersion(), + CentralizedDatasourceSchemaConfig.SCHEMA_VERSION, + segmentSchemaMapping + ); + return null; + } + String dataSource = segments.stream().iterator().next().getDataSource(); + + log.info("Persisting segment schema: [%s].", segmentSchemaMapping); + + segmentSchemaManager.persistSegmentSchema( + handle, + dataSource, + segmentSchemaMapping.getSchemaFingerprintToPayloadMap(), + segmentSchemaMapping.getSchemaVersion() + ); + + // fetch schemaId + Map<String, Long> fingerprintSchemaIdMap = + segmentSchemaManager.schemaIdFetchBatch( + handle, + dataSource, + segmentSchemaMapping.getSchemaVersion(), + segmentSchemaMapping.getSchemaFingerprintToPayloadMap().keySet() + ); + + log.info("Fingerprint schema map is [%s]", fingerprintSchemaIdMap); + + return fingerprintSchemaIdMap; + } + + private Pair<Long, Long> fetchSegmentSchemaInformation( + SegmentSchemaMapping segmentSchemaMapping, + String segmentId, + Map<String, Long> fingerprintSchemaIdMap + ) + { + Long schemaId = null, numRows = null; + SegmentMetadata segmentMetadata = segmentSchemaMapping.getSegmentIdToMetadataMap().get(segmentId); + if (segmentMetadata != null) { + numRows = segmentMetadata.getNumRows(); + String fingerprint = segmentMetadata.getSchemaFingerprint(); + if (fingerprintSchemaIdMap != null && fingerprintSchemaIdMap.containsKey(fingerprint)) { + schemaId = fingerprintSchemaIdMap.get(fingerprint); + } + } + return Pair.of(schemaId, numRows); + } + private Set<DataSegment> announceHistoricalSegmentBatch( final Handle handle, final Set<DataSegment> segments, - final Set<DataSegment> usedSegments + final Set<DataSegment> usedSegments, + @Nullable final SegmentSchemaMapping segmentSchemaMapping ) throws IOException { final Set<DataSegment> toInsertSegments = new HashSet<>(); + Map<String, Long> fingerprintSchemaIdMap = null; + boolean schemaPresent = isSchemaPresent(segmentSchemaMapping); Review Comment: Why are these variables declared outside the `try` block? ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1801,16 +1837,89 @@ public int deletePendingSegments(String dataSource) ); } + private boolean isSchemaPresent(SegmentSchemaMapping segmentSchemaMapping) + { + return publishSchema() + && segmentSchemaMapping != null + && segmentSchemaMapping.isNonEmpty(); + } + + private Map<String, Long> persistSchema( + final Handle handle, + final Set<DataSegment> segments, + final SegmentSchemaMapping segmentSchemaMapping + ) throws JsonProcessingException + { + if (segmentSchemaMapping.getSchemaVersion() != CentralizedDatasourceSchemaConfig.SCHEMA_VERSION) { + log.error( + "Schema version [%d] doesn't match the current version [%d], dropping the schema [%s].", + segmentSchemaMapping.getSchemaVersion(), + CentralizedDatasourceSchemaConfig.SCHEMA_VERSION, + segmentSchemaMapping + ); + return null; + } + String dataSource = segments.stream().iterator().next().getDataSource(); + + log.info("Persisting segment schema: [%s].", segmentSchemaMapping); + + segmentSchemaManager.persistSegmentSchema( + handle, + dataSource, + segmentSchemaMapping.getSchemaFingerprintToPayloadMap(), + segmentSchemaMapping.getSchemaVersion() + ); + + // fetch schemaId + Map<String, Long> fingerprintSchemaIdMap = + segmentSchemaManager.schemaIdFetchBatch( + handle, + dataSource, + segmentSchemaMapping.getSchemaVersion(), + segmentSchemaMapping.getSchemaFingerprintToPayloadMap().keySet() + ); + + log.info("Fingerprint schema map is [%s]", fingerprintSchemaIdMap); + + return fingerprintSchemaIdMap; + } + + private Pair<Long, Long> fetchSegmentSchemaInformation( Review Comment: Once we start persisting the `fingerprint` itself in the `segments` table, we won't need to return the numeric schema ID here. In that case, this method should just return a `SegmentMetadata` object. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -473,11 +500,13 @@ public SegmentPublishResult commitReplaceSegments( return connector.retryTransaction( (handle, transactionStatus) -> { final Set<DataSegment> segmentsToInsert = new HashSet<>(replaceSegments); - segmentsToInsert.addAll( - createNewIdsOfAppendSegmentsAfterReplace(handle, replaceSegments, locksHeldByReplaceTask) - ); + + Set<DataSegmentWithSchemaInformation> appendAfterReplaceSegmentMetadata = Review Comment: ```suggestion Set<DataSegmentWithSchemaInformation> upgradedSegments = ``` ########## server/src/main/java/org/apache/druid/server/coordinator/duty/KillUnreferencedSegmentSchemaDuty.java: ########## @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.server.coordinator.duty; + +import org.apache.druid.java.util.common.DateTimes; +import org.apache.druid.java.util.common.logger.Logger; +import org.apache.druid.metadata.SegmentsMetadataManager; +import org.apache.druid.segment.metadata.SegmentSchemaManager; +import org.apache.druid.server.coordinator.DruidCoordinatorConfig; +import org.apache.druid.server.coordinator.stats.Stats; +import org.joda.time.DateTime; + +import java.util.List; + +/** + * Coordinator duty to clean up segment schema which are not referenced by any used segment. + * <p> + * <ol> + * <li>If a schema is not referenced, UPDATE schemas SET used = false, used_status_last_updated = now</li> + * <li>DELETE FROM schemas WHERE used = false AND used_status_last_updated < 6 hours ago</li> + * <li>When creating a new segment, try to find schema for the fingerprint of the segment.</li> + * <ol type="a"> + * <li> If no record found, create a new one.</li> + * <li> If record found which has used = true, reuse this schema_id.</li> + * <li> If record found which has used = false, UPDATE SET used = true, used_status_last_updated = now</li> + * </ol> + * </ol> + * </p> + * <p> + * Possible race conditions: + * <ol type="a"> + * <li> Between ops 1 and 3b: In other words, we might end up with a segment that points to a schema that has just been marked as unused. This can be repaired by the coordinator duty. </li> + * <li> Between 2 and 3c: This can be handled. Either 2 will fail to update any rows (good case) or 3c will fail to update any rows and thus return 0 (bad case). In the bad case, we need to recreate the schema, same as step 3a. </li> + * </ol> + * </p> + */ +public class KillUnreferencedSegmentSchemaDuty extends MetadataCleanupDuty +{ + private static final Logger log = new Logger(KillUnreferencedSegmentSchemaDuty.class); + private final SegmentSchemaManager segmentSchemaManager; + private final SegmentsMetadataManager segmentsMetadataManager; + + public KillUnreferencedSegmentSchemaDuty( + DruidCoordinatorConfig config, + SegmentSchemaManager segmentSchemaManager, + SegmentsMetadataManager segmentsMetadataManager + ) + { + super( + "segmentSchema", + "druid.coordinator.kill.segmentSchema", + config.isSegmentSchemaKillEnabled(), + config.getSegmentSchemaKillPeriod(), + config.getSegmentSchemaKillDurationToRetain(), + Stats.Kill.RULES, + config + ); + this.segmentSchemaManager = segmentSchemaManager; + this.segmentsMetadataManager = segmentsMetadataManager; + } + + @Override + protected int cleanupEntriesCreatedBefore(DateTime minCreatedTime) + { + log.info("MinCreatedTime is [%s], currentTime is [%s]", minCreatedTime, DateTimes.nowUtc().toString()); Review Comment: This log line is not needed. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -2232,29 +2412,60 @@ private void insertIntoUpgradeSegmentsTable( } } - private List<DataSegment> retrieveSegmentsById(Handle handle, String datasource, Set<String> segmentIds) + private List<DataSegmentWithSchemaInformation> retrieveSegmentsById(Handle handle, String datasource, Set<String> segmentIds) { if (segmentIds.isEmpty()) { return Collections.emptyList(); } - return SqlSegmentsMetadataQuery.forHandle(handle, connector, dbTables, jsonMapper) - .retrieveSegmentsById(datasource, segmentIds) - .stream() - .map(DataSegmentPlus::getDataSegment) - .collect(Collectors.toList()); + if (publishSchema()) { + return SqlSegmentsMetadataQuery.forHandle(handle, connector, dbTables, jsonMapper) + .retrieveSegmentsWithSchemaById(datasource, segmentIds) + .stream() + .map(plus -> + new DataSegmentWithSchemaInformation( + plus.getDataSegment(), + plus.getSchemaId(), + plus.getNumRows() + ) + ) + .collect(Collectors.toList()); + } else { + return SqlSegmentsMetadataQuery.forHandle(handle, connector, dbTables, jsonMapper) + .retrieveSegmentsById(datasource, segmentIds) + .stream() + .map(plus -> + new DataSegmentWithSchemaInformation( + plus.getDataSegment(), + plus.getSchemaId(), + plus.getNumRows() + ) + ) + .collect(Collectors.toList()); + } } private String buildSqlToInsertSegments() { - return StringUtils.format( - "INSERT INTO %1$s (id, dataSource, created_date, start, %2$send%2$s," - + " partitioned, version, used, payload, used_status_last_updated) " - + "VALUES (:id, :dataSource, :created_date, :start, :end," - + " :partitioned, :version, :used, :payload, :used_status_last_updated)", - dbTables.getSegmentsTable(), - connector.getQuoteString() - ); + if (publishSchema()) { Review Comment: In fact, I would suggest we not use this `if` at all. Rather than using two different SQLs, better to explicitly bind the new columns to null when we don't have a schema to bind. It makes reading the code much simpler and we know exactly when the columns are being written as null. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1901,18 +2026,21 @@ private Set<DataSegment> createNewIdsOfAppendSegmentsAfterReplace( .findFirst().orElse(null); final Map<String, String> upgradeSegmentToLockVersion = getAppendSegmentsCommittedDuringTask(handle, taskId); - final List<DataSegment> segmentsToUpgrade + + final List<DataSegmentWithSchemaInformation> segmentsToUpgrade = retrieveSegmentsById(handle, datasource, upgradeSegmentToLockVersion.keySet()); + log.info("createNewIdsOfAppendSegmentsAfterReplace SegmentsToUpgrade are [%s]", segmentsToUpgrade); Review Comment: Not needed. Whenever adding logs, please make sure that they are really adding meaningful information that can be used for debugging scenarios. ########## server/src/main/java/org/apache/druid/metadata/IndexerSQLMetadataStorageCoordinator.java: ########## @@ -1829,8 +1938,18 @@ private Set<DataSegment> announceHistoricalSegmentBatch( PreparedBatch preparedBatch = handle.prepareBatch(buildSqlToInsertSegments()); for (List<DataSegment> partition : partitionedSegments) { for (DataSegment segment : partition) { - preparedBatch.add() - .bind("id", segment.getId().toString()) + String segmentId = segment.getId().toString(); + Long schemaId = null; + Long numRows = null; + + if (schemaPresent && segmentSchemaMapping.getSegmentIdToMetadataMap().containsKey(segmentId)) { + Pair<Long, Long> schemaInfoPair = fetchSegmentSchemaInformation(segmentSchemaMapping, segmentId, fingerprintSchemaIdMap); + schemaId = schemaInfoPair.lhs; + numRows = schemaInfoPair.rhs; + } + + PreparedBatchPart preparedBatchPart = preparedBatch.add() Review Comment: This part need not be changed from the original code. We should always bind `numRows` and `schemaId`, null or not. This way, we don't need two different SQLs. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
