amaliujia commented on a change in pull request #1780: URL: https://github.com/apache/ozone/pull/1780#discussion_r565713332
########## File path: hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImplV2.java ########## @@ -0,0 +1,398 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.block; + +import java.io.IOException; +import java.util.List; +import java.util.UUID; +import java.util.Set; +import java.util.Map; +import java.util.LinkedHashSet; +import java.util.ArrayList; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.stream.Collectors; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; +import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler.DeleteBlockStatus; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManagerV2; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; +import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.ha.DBTransactionBuffer; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; +import org.apache.hadoop.hdds.server.events.EventHandler; +import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.utils.UniqueId; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; + +import com.google.common.collect.Lists; +import static java.lang.Math.min; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A implement class of {@link DeletedBlockLog}, and it uses + * K/V db to maintain block deletion transactions between scm and datanode. + * This is a very basic implementation, it simply scans the log and + * memorize the position that scanned by last time, and uses this to + * determine where the next scan starts. It has no notion about weight + * of each transaction so as long as transaction is still valid, they get + * equally same chance to be retrieved which only depends on the nature + * order of the transaction ID. + */ +public class DeletedBlockLogImplV2 + implements DeletedBlockLog, EventHandler<DeleteBlockStatus> { + + public static final Logger LOG = + LoggerFactory.getLogger(DeletedBlockLogImpl.class); + + private final int maxRetry; + private final ContainerManagerV2 containerManager; + private final Lock lock; + // Maps txId to set of DNs which are successful in committing the transaction + private Map<Long, Set<UUID>> transactionToDNsCommitMap; + // The access to DeletedBlocksTXTable is protected by + // DeletedBlockLogStateManager. + private final DeletedBlockLogStateManager deletedBlockLogStateManager; + private final SCMContext scmContext; + + public DeletedBlockLogImplV2(ConfigurationSource conf, + ContainerManagerV2 containerManager, + SCMRatisServer ratisServer, + Table<Long, DeletedBlocksTransaction> deletedBlocksTXTable, + DBTransactionBuffer dbTxBuffer, + SCMContext scmContext) { + maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, + OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT); + this.containerManager = containerManager; + this.lock = new ReentrantLock(); + + // transactionToDNsCommitMap is updated only when + // transaction is added to the log and when it is removed. + + // maps transaction to dns which have committed it. + transactionToDNsCommitMap = new ConcurrentHashMap<>(); + this.deletedBlockLogStateManager = DeletedBlockLogStateManagerImpl + .newBuilder() + .setConfiguration(conf) + .setDeletedBlocksTable(deletedBlocksTXTable) + .setRatisServer(ratisServer) + .setSCMDBTransactionBuffer(dbTxBuffer) + .build(); + this.scmContext = scmContext; + } + + + @Override + public List<DeletedBlocksTransaction> getFailedTransactions() + throws IOException { + lock.lock(); Review comment: I see. Thanks for clarification. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: issues-unsubscr...@ozone.apache.org For additional commands, e-mail: issues-h...@ozone.apache.org