n3nash commented on a change in pull request #2374: URL: https://github.com/apache/hudi/pull/2374#discussion_r588917850
########## File path: hudi-client/hudi-client-common/src/main/java/org/apache/hudi/client/transaction/SimpleConcurrentFileWritesConflictResolutionStrategy.java ########## @@ -0,0 +1,105 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi.client.transaction; + +import org.apache.hudi.common.model.HoodieCommitMetadata; +import org.apache.hudi.common.model.WriteOperationType; +import org.apache.hudi.common.table.timeline.HoodieActiveTimeline; +import org.apache.hudi.common.table.timeline.HoodieInstant; +import org.apache.hudi.common.table.timeline.HoodieTimeline; +import org.apache.hudi.common.util.CollectionUtils; +import org.apache.hudi.common.util.Option; +import org.apache.hudi.exception.HoodieWriteConflictException; +import org.apache.hudi.table.HoodieTable; +import org.apache.log4j.LogManager; +import org.apache.log4j.Logger; + +import java.util.ConcurrentModificationException; +import java.util.HashSet; +import java.util.Set; +import java.util.stream.Stream; + +import static org.apache.hudi.common.table.timeline.HoodieTimeline.COMPACTION_ACTION; +import static org.apache.hudi.common.table.timeline.HoodieTimeline.REPLACE_COMMIT_ACTION; + +/** + * This class is a basic implementation of a conflict resolution strategy for concurrent writes {@link ConflictResolutionStrategy}. + */ +public class SimpleConcurrentFileWritesConflictResolutionStrategy + implements ConflictResolutionStrategy { + + private static final Logger LOG = LogManager.getLogger(SimpleConcurrentFileWritesConflictResolutionStrategy.class); + + @Override + public Stream<HoodieInstant> getCandidateInstants(HoodieActiveTimeline activeTimeline, HoodieInstant currentInstant, + Option<HoodieInstant> lastSuccessfulInstant) { + + // To find which instants are conflicting, we apply the following logic + // 1. Get completed instants timeline only for commits that have happened since the last successful write. + // 2. Get any scheduled or completed compaction or clustering operations that have started and/or finished + // after the current instant. We need to check for write conflicts since they may have mutated the same files + // that are being newly created by the current write. + // NOTE that any commits from table services such as compaction, clustering or cleaning since the + // overlapping of files is handled using MVCC. + Stream<HoodieInstant> completedCommitsInstantStream = activeTimeline + .getCommitsTimeline() + .filterCompletedInstants() + .findInstantsAfter(lastSuccessfulInstant.isPresent() ? lastSuccessfulInstant.get().getTimestamp() : HoodieTimeline.INIT_INSTANT_TS) + .getInstants(); + + Stream<HoodieInstant> compactionAndClusteringTimeline = activeTimeline + .getTimelineOfActions(CollectionUtils.createSet(REPLACE_COMMIT_ACTION, COMPACTION_ACTION)) + .findInstantsAfter(currentInstant.getTimestamp()) + .getInstants(); + return Stream.concat(completedCommitsInstantStream, compactionAndClusteringTimeline); + } + + @Override + public boolean hasConflict(HoodieCommitOperation thisOperation, HoodieCommitOperation otherOperation) { + // TODO : UUID's can clash even for insert/insert, handle that case. + Set<String> fileIdsSetForFirstInstant = thisOperation.getMutatedFileIds(); + Set<String> fileIdsSetForSecondInstant = otherOperation.getMutatedFileIds(); + Set<String> intersection = new HashSet<>(fileIdsSetForFirstInstant); + intersection.retainAll(fileIdsSetForSecondInstant); + if (!intersection.isEmpty()) { + LOG.warn("Found conflicting writes between first operation = " + thisOperation + + ", second operation = " + otherOperation + " , intersecting file ids " + intersection); + return true; + } + return false; + } + + @Override + public Option<HoodieCommitMetadata> resolveConflict(HoodieTable table, + HoodieCommitOperation thisOperation, HoodieCommitOperation otherOperation) { + // Since compaction is eventually written as commit, we need to ensure + // we handle this during conflict resolution and not treat the commit from compaction operation + // as a regular commit. Regular commit, deltacommits and replace are candidates for conflict + // replace is used for a) clustering without update support b) insert_overwrite both of which are + // candidates for conflict. We need to add CLUSTER here once it supports concurrent updates Review comment: There is already one -> https://issues.apache.org/jira/browse/HUDI-1042 ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected]
