vinothchandar commented on code in PR #11923: URL: https://github.com/apache/hudi/pull/11923#discussion_r1831689641
########## hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v1/ArchivedTimelineLoaderV1.java: ########## @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.common.table.timeline.versioning.v1; + +import org.apache.hudi.avro.model.HoodieArchivedMetaEntry; +import org.apache.hudi.avro.model.HoodieMergeArchiveFilePlan; +import org.apache.hudi.common.model.HoodieLogFile; +import org.apache.hudi.common.model.HoodiePartitionMetadata; +import org.apache.hudi.common.model.HoodieRecord; +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.table.log.HoodieLogFormat; +import org.apache.hudi.common.table.log.block.HoodieAvroDataBlock; +import org.apache.hudi.common.table.log.block.HoodieLogBlock; +import org.apache.hudi.common.table.timeline.ArchivedTimelineLoader; +import org.apache.hudi.common.table.timeline.HoodieArchivedTimeline; +import org.apache.hudi.common.table.timeline.TimelineMetadataUtils; +import org.apache.hudi.common.util.FileIOUtils; +import org.apache.hudi.common.util.StringUtils; +import org.apache.hudi.common.util.collection.ClosableIterator; +import org.apache.hudi.exception.HoodieIOException; +import org.apache.hudi.storage.HoodieStorage; +import org.apache.hudi.storage.StoragePath; +import org.apache.hudi.storage.StoragePathInfo; + +import org.apache.avro.generic.GenericRecord; +import org.apache.avro.generic.IndexedRecord; +import org.jetbrains.annotations.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.io.Serializable; +import java.util.Comparator; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.Spliterator; +import java.util.Spliterators; +import java.util.function.BiConsumer; +import java.util.function.Function; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.StreamSupport; + +public class ArchivedTimelineLoaderV1 implements ArchivedTimelineLoader { + private static final String MERGE_ARCHIVE_PLAN_NAME = "mergeArchivePlan"; + private static final Pattern ARCHIVE_FILE_PATTERN = + Pattern.compile("^\\.commits_\\.archive\\.([0-9]+).*"); + private static final String STATE_TRANSITION_TIME = "stateTransitionTime"; + private static final String ACTION_TYPE_KEY = "actionType"; + private static final Logger LOG = LoggerFactory.getLogger(ArchivedTimelineLoaderV1.class); + + @Override + public void loadInstants(HoodieTableMetaClient metaClient, + @Nullable HoodieArchivedTimeline.TimeRangeFilter filter, + HoodieArchivedTimeline.LoadMode loadMode, + Function<GenericRecord, Boolean> commitsFilter, + BiConsumer<String, GenericRecord> recordConsumer) { + Set<String> instantsInRange = new HashSet<>(); + try { + // List all files + List<StoragePathInfo> entryList = metaClient.getStorage().globEntries( + new StoragePath(metaClient.getArchivePath() + "/.commits_.archive*")); + + // Sort files by version suffix in reverse (implies reverse chronological order) + entryList.sort(new ArchiveFileVersionComparator()); + + for (StoragePathInfo fs : entryList) { + // Read the archived file + try (HoodieLogFormat.Reader reader = HoodieLogFormat.newReader(metaClient.getStorage(), + new HoodieLogFile(fs.getPath()), HoodieArchivedMetaEntry.getClassSchema())) { + int instantsInPreviousFile = instantsInRange.size(); + // Read the avro blocks + while (reader.hasNext()) { + HoodieLogBlock block = reader.next(); + if (block instanceof HoodieAvroDataBlock) { + HoodieAvroDataBlock avroBlock = (HoodieAvroDataBlock) block; + // TODO If we can store additional metadata in datablock, we can skip parsing records + // (such as startTime, endTime of records in the block) + try (ClosableIterator<HoodieRecord<IndexedRecord>> itr = avroBlock.getRecordIterator(HoodieRecord.HoodieRecordType.AVRO)) { + StreamSupport.stream(Spliterators.spliteratorUnknownSize(itr, Spliterator.IMMUTABLE), true) + // Filter blocks in desired time window + .map(r -> (GenericRecord) r.getData()) + .filter(commitsFilter::apply) + .forEach(r -> { + String instantTime = r.get(HoodiePartitionMetadata.COMMIT_TIME_KEY).toString(); Review Comment: do we have to depend on partition metadata? I'd like to not to if possible ########## hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/timeline/versioning/v1/TimelineArchiverV1.java: ########## @@ -0,0 +1,444 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.client.timeline.versioning.v1; + +import org.apache.hudi.avro.model.HoodieArchivedMetaEntry; +import org.apache.hudi.client.timeline.HoodieTimelineArchiver; +import org.apache.hudi.client.transaction.TransactionManager; +import org.apache.hudi.common.engine.HoodieEngineContext; +import org.apache.hudi.common.model.HoodieArchivedLogFile; +import org.apache.hudi.common.model.HoodieAvroIndexedRecord; +import org.apache.hudi.common.model.HoodieAvroPayload; +import org.apache.hudi.common.model.HoodieRecord; +import org.apache.hudi.common.model.HoodieTableType; +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.table.log.HoodieLogFormat; +import org.apache.hudi.common.table.log.HoodieLogFormat.Writer; +import org.apache.hudi.common.table.log.block.HoodieAvroDataBlock; +import org.apache.hudi.common.table.log.block.HoodieLogBlock; +import org.apache.hudi.common.table.log.block.HoodieLogBlock.HeaderMetadataType; +import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.table.timeline.InstantFileNameFactory; +import org.apache.hudi.common.table.timeline.MetadataConversionUtils; +import org.apache.hudi.common.table.timeline.TimelineUtils; +import org.apache.hudi.common.table.timeline.versioning.v1.ActiveTimelineV1; +import org.apache.hudi.common.table.timeline.versioning.v1.ArchivedTimelineV1; +import org.apache.hudi.common.table.timeline.versioning.v1.InstantComparatorV1; +import org.apache.hudi.common.table.timeline.versioning.v1.InstantFileNameFactoryV1; +import org.apache.hudi.common.util.ClusteringUtils; +import org.apache.hudi.common.util.CollectionUtils; +import org.apache.hudi.common.util.CompactionUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.common.util.collection.Pair; +import org.apache.hudi.config.HoodieWriteConfig; +import org.apache.hudi.exception.HoodieCommitException; +import org.apache.hudi.exception.HoodieException; +import org.apache.hudi.metadata.HoodieTableMetadata; +import org.apache.hudi.storage.StoragePath; +import org.apache.hudi.table.HoodieTable; +import org.apache.hudi.table.action.compact.CompactionTriggerStrategy; +import org.apache.hudi.table.marker.WriteMarkers; +import org.apache.hudi.table.marker.WriteMarkersFactory; + +import org.apache.avro.Schema; +import org.apache.avro.generic.IndexedRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.hudi.client.utils.ArchivalUtils.getMinAndMaxInstantsToKeep; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.GREATER_THAN; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.LESSER_THAN; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.LESSER_THAN_OR_EQUALS; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.compareTimestamps; + +/** + * Archiver to bound the growth of files under .hoodie meta path. + */ +public class TimelineArchiverV1<T extends HoodieAvroPayload, I, K, O> implements HoodieTimelineArchiver<T, I, K, O> { + + private static final Logger LOG = LoggerFactory.getLogger(TimelineArchiverV1.class); + + private final StoragePath archiveFilePath; + private final HoodieWriteConfig config; + private Writer writer; + private final int maxInstantsToKeep; + private final int minInstantsToKeep; + private final HoodieTable<T, I, K, O> table; + private final HoodieTableMetaClient metaClient; + private final TransactionManager txnManager; + + public TimelineArchiverV1(HoodieWriteConfig config, HoodieTable<T, I, K, O> table) { + this.config = config; + this.table = table; + this.metaClient = table.getMetaClient(); + this.archiveFilePath = ArchivedTimelineV1.getArchiveLogPath(metaClient.getArchivePath()); + this.txnManager = new TransactionManager(config, table.getMetaClient().getStorage()); + Pair<Integer, Integer> minAndMaxInstants = getMinAndMaxInstantsToKeep(table, metaClient); + this.minInstantsToKeep = minAndMaxInstants.getLeft(); + this.maxInstantsToKeep = minAndMaxInstants.getRight(); + } + + private Writer openWriter() { + try { + if (this.writer == null) { + return HoodieLogFormat.newWriterBuilder().onParentPath(archiveFilePath.getParent()) + .withFileId(archiveFilePath.getName()).withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION) Review Comment: is there a cost to not passing in the base commit.. previously we just used `"""` ########## hudi-common/src/main/java/org/apache/hudi/common/table/timeline/ActiveTimelineUtils.java: ########## @@ -0,0 +1,165 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.common.table.timeline; + +import org.apache.hudi.common.model.HoodieTableType; +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.exception.HoodieIOException; +import org.apache.hudi.storage.HoodieStorage; +import org.apache.hudi.storage.StoragePath; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.text.ParseException; +import java.util.Date; +import java.util.HashSet; +import java.util.Set; + +import static org.apache.hudi.common.table.timeline.HoodieTimeline.COMMIT_ACTION; +import static org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION; +import static org.apache.hudi.common.table.timeline.HoodieTimeline.DELTA_COMMIT_ACTION; +import static org.apache.hudi.common.table.timeline.HoodieTimeline.LOG_COMPACTION_ACTION; + +public class ActiveTimelineUtils { + private static final Logger LOG = LoggerFactory.getLogger(ActiveTimelineUtils.class); + + public static final Set<String> NOT_PARSABLE_TIMESTAMPS = new HashSet<String>(3) { + { + add(HoodieTimeline.INIT_INSTANT_TS); + add(HoodieTimeline.METADATA_BOOTSTRAP_INSTANT_TS); + add(HoodieTimeline.FULL_BOOTSTRAP_INSTANT_TS); + } + }; + + /** + * Parse the timestamp of an Instant and return a {@code Date}. + * Throw ParseException if timestamp is not valid format as + * {@link org.apache.hudi.common.table.timeline.HoodieInstantTimeGenerator#SECS_INSTANT_TIMESTAMP_FORMAT}. + * + * @param timestamp a timestamp String which follow pattern as + * {@link org.apache.hudi.common.table.timeline.HoodieInstantTimeGenerator#SECS_INSTANT_TIMESTAMP_FORMAT}. + * @return Date of instant timestamp + */ + public static Date parseDateFromInstantTime(String timestamp) throws ParseException { Review Comment: are all these methods from moved over from existing `HoodieActiveTimeline` ########## hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/timeline/versioning/v1/TimelineArchiverV1.java: ########## @@ -0,0 +1,444 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.client.timeline.versioning.v1; + +import org.apache.hudi.avro.model.HoodieArchivedMetaEntry; +import org.apache.hudi.client.timeline.HoodieTimelineArchiver; +import org.apache.hudi.client.transaction.TransactionManager; +import org.apache.hudi.common.engine.HoodieEngineContext; +import org.apache.hudi.common.model.HoodieArchivedLogFile; +import org.apache.hudi.common.model.HoodieAvroIndexedRecord; +import org.apache.hudi.common.model.HoodieAvroPayload; +import org.apache.hudi.common.model.HoodieRecord; +import org.apache.hudi.common.model.HoodieTableType; +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.table.log.HoodieLogFormat; +import org.apache.hudi.common.table.log.HoodieLogFormat.Writer; +import org.apache.hudi.common.table.log.block.HoodieAvroDataBlock; +import org.apache.hudi.common.table.log.block.HoodieLogBlock; +import org.apache.hudi.common.table.log.block.HoodieLogBlock.HeaderMetadataType; +import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.table.timeline.InstantFileNameFactory; +import org.apache.hudi.common.table.timeline.MetadataConversionUtils; +import org.apache.hudi.common.table.timeline.TimelineUtils; +import org.apache.hudi.common.table.timeline.versioning.v1.ActiveTimelineV1; +import org.apache.hudi.common.table.timeline.versioning.v1.ArchivedTimelineV1; +import org.apache.hudi.common.table.timeline.versioning.v1.InstantComparatorV1; +import org.apache.hudi.common.table.timeline.versioning.v1.InstantFileNameFactoryV1; +import org.apache.hudi.common.util.ClusteringUtils; +import org.apache.hudi.common.util.CollectionUtils; +import org.apache.hudi.common.util.CompactionUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.common.util.collection.Pair; +import org.apache.hudi.config.HoodieWriteConfig; +import org.apache.hudi.exception.HoodieCommitException; +import org.apache.hudi.exception.HoodieException; +import org.apache.hudi.metadata.HoodieTableMetadata; +import org.apache.hudi.storage.StoragePath; +import org.apache.hudi.table.HoodieTable; +import org.apache.hudi.table.action.compact.CompactionTriggerStrategy; +import org.apache.hudi.table.marker.WriteMarkers; +import org.apache.hudi.table.marker.WriteMarkersFactory; + +import org.apache.avro.Schema; +import org.apache.avro.generic.IndexedRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.hudi.client.utils.ArchivalUtils.getMinAndMaxInstantsToKeep; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.GREATER_THAN; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.LESSER_THAN; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.LESSER_THAN_OR_EQUALS; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.compareTimestamps; + +/** + * Archiver to bound the growth of files under .hoodie meta path. + */ +public class TimelineArchiverV1<T extends HoodieAvroPayload, I, K, O> implements HoodieTimelineArchiver<T, I, K, O> { + + private static final Logger LOG = LoggerFactory.getLogger(TimelineArchiverV1.class); + + private final StoragePath archiveFilePath; + private final HoodieWriteConfig config; + private Writer writer; + private final int maxInstantsToKeep; + private final int minInstantsToKeep; + private final HoodieTable<T, I, K, O> table; + private final HoodieTableMetaClient metaClient; + private final TransactionManager txnManager; + + public TimelineArchiverV1(HoodieWriteConfig config, HoodieTable<T, I, K, O> table) { + this.config = config; + this.table = table; + this.metaClient = table.getMetaClient(); + this.archiveFilePath = ArchivedTimelineV1.getArchiveLogPath(metaClient.getArchivePath()); + this.txnManager = new TransactionManager(config, table.getMetaClient().getStorage()); + Pair<Integer, Integer> minAndMaxInstants = getMinAndMaxInstantsToKeep(table, metaClient); + this.minInstantsToKeep = minAndMaxInstants.getLeft(); + this.maxInstantsToKeep = minAndMaxInstants.getRight(); + } + + private Writer openWriter() { + try { + if (this.writer == null) { + return HoodieLogFormat.newWriterBuilder().onParentPath(archiveFilePath.getParent()) + .withFileId(archiveFilePath.getName()).withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION) + .withStorage(metaClient.getStorage()).build(); + } else { + return this.writer; + } + } catch (IOException e) { + throw new HoodieException("Unable to initialize HoodieLogFormat writer", e); + } + } + + public Writer reOpenWriter() { + try { + if (this.writer != null) { + this.writer.close(); + this.writer = null; + } + this.writer = openWriter(); + return writer; + } catch (IOException e) { + throw new HoodieException("Unable to initialize HoodieLogFormat writer", e); + } + } + + private void close() { + try { + if (this.writer != null) { + this.writer.close(); + } + } catch (IOException e) { + throw new HoodieException("Unable to close HoodieLogFormat writer", e); + } + } + + @Override + public int archiveIfRequired(HoodieEngineContext context, boolean acquireLock) throws IOException { + //NOTE: We permanently disable merging archive files. This is different from 0.15 behavior. + //TODO: NEED TO BE IN RELEASE DOC for behavior change. Review Comment: Made a note on HUDI-8158. we can remove the TODO ########## hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v2/CommitMetadataSerDeV2.java: ########## @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.common.table.timeline.versioning.v2; + +import org.apache.hudi.avro.model.HoodieCommitMetadata; +import org.apache.hudi.common.table.timeline.CommitMetadataSerDe; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.MetadataConversionUtils; +import org.apache.hudi.common.util.JsonUtils; +import org.apache.hudi.common.util.Option; + +import org.apache.avro.file.DataFileWriter; +import org.apache.avro.io.DatumWriter; +import org.apache.avro.specific.SpecificDatumWriter; +import org.apache.avro.specific.SpecificRecordBase; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; + +import static org.apache.hudi.common.table.timeline.MetadataConversionUtils.convertCommitMetadataToJsonBytes; +import static org.apache.hudi.common.table.timeline.TimelineMetadataUtils.deserializeCommitMetadata; +import static org.apache.hudi.common.util.StringUtils.fromUTF8Bytes; + +public class CommitMetadataSerDeV2 implements CommitMetadataSerDe { + + @Override + public <T> T deserialize(HoodieInstant instant, byte[] bytes, Class<T> clazz) throws IOException { Review Comment: lets unit test these well. ########## hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/timeline/versioning/v2/TimelineArchiverV2.java: ########## @@ -0,0 +1,369 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.client.timeline.versioning.v2; + +import org.apache.hudi.client.timeline.HoodieTimelineArchiver; +import org.apache.hudi.client.transaction.TransactionManager; +import org.apache.hudi.common.engine.HoodieEngineContext; +import org.apache.hudi.common.model.HoodieAvroPayload; +import org.apache.hudi.common.model.HoodieTableType; +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.table.timeline.ActiveAction; +import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.table.timeline.TimelineUtils; +import org.apache.hudi.common.table.timeline.versioning.v2.ActiveTimelineV2; +import org.apache.hudi.common.table.timeline.versioning.v2.InstantComparatorV2; +import org.apache.hudi.common.util.ClusteringUtils; +import org.apache.hudi.common.util.CollectionUtils; +import org.apache.hudi.common.util.CompactionUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.common.util.collection.Pair; +import org.apache.hudi.config.HoodieWriteConfig; +import org.apache.hudi.exception.HoodieException; +import org.apache.hudi.exception.HoodieLockException; +import org.apache.hudi.metadata.HoodieTableMetadata; +import org.apache.hudi.metrics.HoodieMetrics; +import org.apache.hudi.table.HoodieTable; +import org.apache.hudi.table.action.compact.CompactionTriggerStrategy; +import org.apache.hudi.table.marker.WriteMarkers; +import org.apache.hudi.table.marker.WriteMarkersFactory; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.hudi.client.utils.ArchivalUtils.getMinAndMaxInstantsToKeep; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.LESSER_THAN; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.compareTimestamps; + +/** + * Archiver to bound the growth of files under .hoodie meta path. + */ +public class TimelineArchiverV2<T extends HoodieAvroPayload, I, K, O> implements HoodieTimelineArchiver<T, I, K, O> { + + private static final Logger LOG = LoggerFactory.getLogger(TimelineArchiverV2.class); + + private final HoodieWriteConfig config; + private final int maxInstantsToKeep; + private final int minInstantsToKeep; + private final HoodieTable<T, I, K, O> table; + private final HoodieTableMetaClient metaClient; + private final TransactionManager txnManager; + + private final LSMTimelineWriter timelineWriter; + private final HoodieMetrics metrics; Review Comment: unused? ########## hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v1/InstantComparatorV1.java: ########## @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.common.table.timeline.versioning.v1; + +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.InstantComparator; +import org.apache.hudi.common.table.timeline.versioning.common.InstantComparatorHelper; + +import java.io.Serializable; +import java.util.Comparator; +import java.util.HashMap; +import java.util.Map; + +import static org.apache.hudi.common.table.timeline.HoodieTimeline.COMMIT_ACTION; +import static org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION; +import static org.apache.hudi.common.table.timeline.HoodieTimeline.DELTA_COMMIT_ACTION; +import static org.apache.hudi.common.table.timeline.HoodieTimeline.LOG_COMPACTION_ACTION; + +public class InstantComparatorV1 implements Serializable, InstantComparator { + + /** + * A COMPACTION action eventually becomes COMMIT when completed. So, when grouping instants + * for state transitions, this needs to be taken into account + */ + private static final Map<String, String> COMPARABLE_ACTIONS = createComparableActionsMap(); + + public static final Comparator<HoodieInstant> ACTION_COMPARATOR = + new InstantComparatorHelper.ActionComparator(COMPARABLE_ACTIONS); + + public static final Comparator<HoodieInstant> COMPARATOR = + new InstantComparatorHelper.TimestampBasedComparator(COMPARABLE_ACTIONS); Review Comment: rename:requestedTimeBasedCompartor ########## hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v1/BaseTimelineV1.java: ########## @@ -0,0 +1,153 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.common.table.timeline.versioning.v1; + +import org.apache.hudi.common.table.timeline.TimelineLayout; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion; +import org.apache.hudi.common.table.timeline.AbstractHoodieBaseTimeline; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.util.ClusteringUtils; +import org.apache.hudi.common.util.CollectionUtils; +import org.apache.hudi.common.util.Option; + +import java.util.List; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +public class BaseTimelineV1 extends AbstractHoodieBaseTimeline { + + public BaseTimelineV1(Stream<HoodieInstant> instants, Function<HoodieInstant, Option<byte[]>> details) { + this(instants, details, TimelineLayout.getLayout(TimelineLayoutVersion.LAYOUT_VERSION_1)); + } + + private BaseTimelineV1(Stream<HoodieInstant> instants, Function<HoodieInstant, Option<byte[]>> details, TimelineLayout layout) { + super(instants, details, layout.getTimelineFactory(), layout.getInstantComparator(), layout.getInstantFactory()); + } + + /** + * For serialization and de-serialization only. + * + * @deprecated + */ + @Deprecated + public BaseTimelineV1() { + super(TimelineLayout.getLayout(TimelineLayoutVersion.LAYOUT_VERSION_1)); + } + + @Override + public HoodieTimeline getWriteTimeline() { Review Comment: confirming this is copied over from 0.x as -is.. all these actions considered part of writetimeline ########## hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/upgrade/EightToSevenDowngradeHandler.java: ########## @@ -72,20 +81,43 @@ public Map<ConfigProperty, String> downgrade(HoodieWriteConfig config, HoodieEng HoodieTableMetaClient metaClient = HoodieTableMetaClient.builder().setConf(context.getStorageConf().newInstance()).setBasePath(config.getBasePath()).build(); List<HoodieInstant> instants = metaClient.getActiveTimeline().getInstants(); if (!instants.isEmpty()) { + InstantFileNameFactory instantFileNameFactory = metaClient.getTimelineLayout().getInstantFileNameFactory(); + CommitMetadataSerDeV2 commitMetadataSerDeV2 = new CommitMetadataSerDeV2(); + CommitMetadataSerDeV1 commitMetadataSerDeV1 = new CommitMetadataSerDeV1(); + ActiveTimelineV1 activeTimelineV1 = new ActiveTimelineV1(metaClient); + String tmpFilePrefix = "temp_commit_file_for_eight_to_seven_downgrade_"; context.map(instants, instant -> { - if (instant.getFileName().contains(UNDERSCORE)) { + String fileName = instantFileNameFactory.getFileName(instant); + if (fileName.contains(UNDERSCORE)) { try { // Rename the metadata file name from the ${instant_time}_${completion_time}.action[.state] format in version 1.x to the ${instant_time}.action[.state] format in version 0.x. - StoragePath fromPath = new StoragePath(metaClient.getMetaPath(), instant.getFileName()); - StoragePath toPath = new StoragePath(metaClient.getMetaPath(), instant.getFileName().replaceAll(UNDERSCORE + "\\d+", "")); - boolean success = metaClient.getStorage().rename(fromPath, toPath); + StoragePath fromPath = new StoragePath(metaClient.getMetaPath(), fileName); + StoragePath toPath = new StoragePath(metaClient.getMetaPath(), fileName.replaceAll(UNDERSCORE + "\\d+", "")); + boolean success = true; Review Comment: do the tests cover this? have you tried to do some downgrading? Would be good to file JIRAs with gaps ########## hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/timeline/versioning/v1/TimelineArchiverV1.java: ########## @@ -0,0 +1,444 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.hudi.client.timeline.versioning.v1; + +import org.apache.hudi.avro.model.HoodieArchivedMetaEntry; +import org.apache.hudi.client.timeline.HoodieTimelineArchiver; +import org.apache.hudi.client.transaction.TransactionManager; +import org.apache.hudi.common.engine.HoodieEngineContext; +import org.apache.hudi.common.model.HoodieArchivedLogFile; +import org.apache.hudi.common.model.HoodieAvroIndexedRecord; +import org.apache.hudi.common.model.HoodieAvroPayload; +import org.apache.hudi.common.model.HoodieRecord; +import org.apache.hudi.common.model.HoodieTableType; +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.table.log.HoodieLogFormat; +import org.apache.hudi.common.table.log.HoodieLogFormat.Writer; +import org.apache.hudi.common.table.log.block.HoodieAvroDataBlock; +import org.apache.hudi.common.table.log.block.HoodieLogBlock; +import org.apache.hudi.common.table.log.block.HoodieLogBlock.HeaderMetadataType; +import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.table.timeline.InstantFileNameFactory; +import org.apache.hudi.common.table.timeline.MetadataConversionUtils; +import org.apache.hudi.common.table.timeline.TimelineUtils; +import org.apache.hudi.common.table.timeline.versioning.v1.ActiveTimelineV1; +import org.apache.hudi.common.table.timeline.versioning.v1.ArchivedTimelineV1; +import org.apache.hudi.common.table.timeline.versioning.v1.InstantComparatorV1; +import org.apache.hudi.common.table.timeline.versioning.v1.InstantFileNameFactoryV1; +import org.apache.hudi.common.util.ClusteringUtils; +import org.apache.hudi.common.util.CollectionUtils; +import org.apache.hudi.common.util.CompactionUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.common.util.collection.Pair; +import org.apache.hudi.config.HoodieWriteConfig; +import org.apache.hudi.exception.HoodieCommitException; +import org.apache.hudi.exception.HoodieException; +import org.apache.hudi.metadata.HoodieTableMetadata; +import org.apache.hudi.storage.StoragePath; +import org.apache.hudi.table.HoodieTable; +import org.apache.hudi.table.action.compact.CompactionTriggerStrategy; +import org.apache.hudi.table.marker.WriteMarkers; +import org.apache.hudi.table.marker.WriteMarkersFactory; + +import org.apache.avro.Schema; +import org.apache.avro.generic.IndexedRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static org.apache.hudi.client.utils.ArchivalUtils.getMinAndMaxInstantsToKeep; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.GREATER_THAN; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.LESSER_THAN; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.LESSER_THAN_OR_EQUALS; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.compareTimestamps; + +/** + * Archiver to bound the growth of files under .hoodie meta path. + */ +public class TimelineArchiverV1<T extends HoodieAvroPayload, I, K, O> implements HoodieTimelineArchiver<T, I, K, O> { + + private static final Logger LOG = LoggerFactory.getLogger(TimelineArchiverV1.class); + + private final StoragePath archiveFilePath; + private final HoodieWriteConfig config; + private Writer writer; + private final int maxInstantsToKeep; + private final int minInstantsToKeep; + private final HoodieTable<T, I, K, O> table; + private final HoodieTableMetaClient metaClient; + private final TransactionManager txnManager; + + public TimelineArchiverV1(HoodieWriteConfig config, HoodieTable<T, I, K, O> table) { + this.config = config; + this.table = table; + this.metaClient = table.getMetaClient(); + this.archiveFilePath = ArchivedTimelineV1.getArchiveLogPath(metaClient.getArchivePath()); + this.txnManager = new TransactionManager(config, table.getMetaClient().getStorage()); + Pair<Integer, Integer> minAndMaxInstants = getMinAndMaxInstantsToKeep(table, metaClient); + this.minInstantsToKeep = minAndMaxInstants.getLeft(); + this.maxInstantsToKeep = minAndMaxInstants.getRight(); + } + + private Writer openWriter() { + try { + if (this.writer == null) { + return HoodieLogFormat.newWriterBuilder().onParentPath(archiveFilePath.getParent()) + .withFileId(archiveFilePath.getName()).withFileExtension(HoodieArchivedLogFile.ARCHIVE_EXTENSION) + .withStorage(metaClient.getStorage()).build(); + } else { + return this.writer; + } + } catch (IOException e) { + throw new HoodieException("Unable to initialize HoodieLogFormat writer", e); + } + } + + public Writer reOpenWriter() { + try { + if (this.writer != null) { + this.writer.close(); + this.writer = null; + } + this.writer = openWriter(); + return writer; + } catch (IOException e) { + throw new HoodieException("Unable to initialize HoodieLogFormat writer", e); + } + } + + private void close() { + try { + if (this.writer != null) { + this.writer.close(); + } + } catch (IOException e) { + throw new HoodieException("Unable to close HoodieLogFormat writer", e); + } + } + + @Override + public int archiveIfRequired(HoodieEngineContext context, boolean acquireLock) throws IOException { + //NOTE: We permanently disable merging archive files. This is different from 0.15 behavior. Review Comment: again, what was the reason for this? do we have a jira with the context to bring that back later.. I can try after we have the log changes also in. ########## hudi-common/src/main/java/org/apache/hudi/common/table/timeline/versioning/v1/CompletionTimeQueryViewV1.java: ########## @@ -0,0 +1,239 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.common.table.timeline.versioning.v1; + +import org.apache.hudi.common.table.HoodieTableMetaClient; +import org.apache.hudi.common.table.log.InstantRange; +import org.apache.hudi.common.table.timeline.CompletionTimeQueryView; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieInstantTimeGenerator; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.table.timeline.InstantComparatorUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.common.util.VisibleForTesting; + +import java.io.Serializable; +import java.time.Instant; +import java.util.Date; +import java.util.List; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.function.Function; + +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.GREATER_THAN_OR_EQUALS; +import static org.apache.hudi.common.table.timeline.InstantComparatorUtils.LESSER_THAN; + +public class CompletionTimeQueryViewV1 implements CompletionTimeQueryView, Serializable { + private static final long serialVersionUID = 1L; + + private static final long MILLI_SECONDS_IN_THREE_DAYS = 3 * 24 * 3600 * 1000; + + private static final long MILLI_SECONDS_IN_ONE_DAY = 24 * 3600 * 1000; + + private final HoodieTableMetaClient metaClient; + + /** + * Mapping from instant start time -> completion time. + * Should be thread-safe data structure. + */ + private final ConcurrentMap<String, String> beginToCompletionInstantTimeMap; + + /** + * The cursor instant time to eagerly load from, by default load last N days of completed instants. + * It can grow dynamically with lazy loading. e.g. assuming an initial cursor instant as t10, + * a completion query for t5 would trigger lazy loading with this cursor instant updated to t5. + * This sliding window model amortizes redundant loading from different queries. + */ + private volatile String cursorInstant; + + /** + * The first write instant on the active timeline, used for query optimization. + */ + private final String firstNonSavepointCommit; + + /** + * The constructor. + * + * @param metaClient The table meta client. + */ + public CompletionTimeQueryViewV1(HoodieTableMetaClient metaClient) { + this(metaClient, HoodieInstantTimeGenerator.formatDate(new Date(Instant.now().minusMillis(MILLI_SECONDS_IN_THREE_DAYS).toEpochMilli()))); + } + + /** + * The constructor. + * + * @param metaClient The table meta client. + * @param eagerLoadInstant The earliest instant time to eagerly load from, by default load last N days of completed instants. + */ + public CompletionTimeQueryViewV1(HoodieTableMetaClient metaClient, String eagerLoadInstant) { + this.metaClient = metaClient; + this.beginToCompletionInstantTimeMap = new ConcurrentHashMap<>(); + this.cursorInstant = InstantComparatorUtils.minInstant(eagerLoadInstant, metaClient.getActiveTimeline().firstInstant().map(HoodieInstant::getRequestTime).orElse("")); + // Note: use getWriteTimeline() to keep sync with the fs view visibleCommitsAndCompactionTimeline, see AbstractTableFileSystemView.refreshTimeline. + this.firstNonSavepointCommit = metaClient.getActiveTimeline().getWriteTimeline().getFirstNonSavepointCommit().map(HoodieInstant::getRequestTime).orElse(""); + load(); + } + + @Override + public boolean isCompleted(String beginInstantTime) { + // archival does not proceed beyond the first savepoint, so any instant before that is completed. + return this.beginToCompletionInstantTimeMap.containsKey(beginInstantTime) || isArchived(beginInstantTime); + } + + @Override + public boolean isArchived(String instantTime) { + return InstantComparatorUtils.compareTimestamps(instantTime, LESSER_THAN, this.firstNonSavepointCommit); + } + + @Override + public boolean isCompletedBefore(String baseInstant, String instantTime) { + Option<String> completionTimeOpt = getCompletionTime(baseInstant, instantTime); + if (completionTimeOpt.isPresent()) { + return InstantComparatorUtils.compareTimestamps(completionTimeOpt.get(), LESSER_THAN, baseInstant); + } + return false; + } + + @Override + public boolean isSlicedAfterOrOn(String baseInstant, String instantTime) { + Option<String> completionTimeOpt = getCompletionTime(baseInstant, instantTime); + if (completionTimeOpt.isPresent()) { + return InstantComparatorUtils.compareTimestamps(completionTimeOpt.get(), GREATER_THAN_OR_EQUALS, baseInstant); + } + return true; + } + + @Override + public Option<String> getCompletionTime(String baseInstant, String instantTime) { + Option<String> completionTimeOpt = getCompletionTime(instantTime); + if (completionTimeOpt.isPresent()) { + String completionTime = completionTimeOpt.get(); + if (completionTime.length() != baseInstant.length()) { + // ============================================================== + // LEGACY CODE + // ============================================================== + // Fixes the completion time to reflect the completion sequence correctly + // if the file slice base instant time is not in datetime format. + // For example, many test cases just use integer string as the instant time. + // CAUTION: this fix only works for OCC(Optimistic Concurrency Control). + // for NB-CC(Non-blocking Concurrency Control), the file slicing may be incorrect. + return Option.of(instantTime); + } + } + return completionTimeOpt; + } + + @Override + public Option<String> getCompletionTime(String beginTime) { + String completionTime = this.beginToCompletionInstantTimeMap.get(beginTime); + if (completionTime != null) { + return Option.of(completionTime); + } + + // ***This is the key change between V1 and V2 completion time query-view*** + if (isArchived(beginTime)) { + // Completion time and begin time are same for archived instants. + return Option.of(beginTime); + } + // the instant is still pending + return Option.empty(); + } + + @Override + public List<String> getInstantTimes( + HoodieTimeline timeline, + Option<String> rangeStart, + Option<String> rangeEnd, + InstantRange.RangeType rangeType) { + // assumes any instant/transaction lasts at most 1 day to optimize the query efficiency. + return getInstantTimes(timeline, rangeStart, rangeEnd, rangeType, s -> HoodieInstantTimeGenerator.instantTimeMinusMillis(s, MILLI_SECONDS_IN_ONE_DAY)); + } + + @Override + @VisibleForTesting + public List<String> getInstantTimes( + String rangeStart, + String rangeEnd, + Function<String, String> earliestInstantTimeFunc) { + return getInstantTimes(metaClient.getCommitsTimeline().filterCompletedInstants(), Option.ofNullable(rangeStart), Option.ofNullable(rangeEnd), + InstantRange.RangeType.CLOSED_CLOSED, earliestInstantTimeFunc); + } + + /** + * Queries the instant start time with given completion time range. + * + * @param timeline The timeline. + * @param rangeStart The query range start completion time. + * @param rangeEnd The query range end completion time. + * @param rangeType The range type. + * @param earliestInstantTimeFunc The function to generate the earliest start time boundary + * with the minimum completion time. + * + * @return The sorted instant time list. + */ + private List<String> getInstantTimes( + HoodieTimeline timeline, + Option<String> rangeStart, + Option<String> rangeEnd, + InstantRange.RangeType rangeType, + Function<String, String> earliestInstantTimeFunc) { + throw new RuntimeException("Not Implemented Yet !"); Review Comment: better error message here? `Incremental query view for timeline version 1 not yet implemented` -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
