This is an automated email from the ASF dual-hosted git repository. zanderxu pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/hadoop.git
commit 7764b39e4abf352a3665e7b1d24dec0654b37e6d Author: ZanderXu <zande...@apache.org> AuthorDate: Sat Mar 23 21:27:34 2024 +0800 HDFS-17414. [FGL] RPCs in DatanodeProtocol support fine-grained lock (#6649) --- .../hdfs/server/blockmanagement/BlockManager.java | 20 +++++++------- .../server/blockmanagement/DatanodeManager.java | 7 ++--- .../server/blockmanagement/ProvidedStorageMap.java | 3 ++- .../hdfs/server/namenode/FSDirWriteFileOp.java | 3 ++- .../hadoop/hdfs/server/namenode/FSDirectory.java | 2 +- .../hadoop/hdfs/server/namenode/FSNamesystem.java | 31 +++++++++++----------- .../server/blockmanagement/TestBlockManager.java | 5 ++++ .../blockmanagement/TestDatanodeManager.java | 15 ++++++++++- 8 files changed, 54 insertions(+), 32 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 5fa7c63b261..18049d510b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -97,6 +97,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodesInPath; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; +import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode; import org.apache.hadoop.hdfs.server.namenode.ha.HAContext; import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfyManager; @@ -1900,7 +1901,7 @@ private Block getBlockOnStorage(BlockInfo storedBlock, */ public void findAndMarkBlockAsCorrupt(final ExtendedBlock blk, final DatanodeInfo dn, String storageID, String reason) throws IOException { - assert namesystem.hasWriteLock(); + assert namesystem.hasWriteLock(FSNamesystemLockMode.BM); final Block reportedBlock = blk.getLocalBlock(); final BlockInfo storedBlock = getStoredBlock(reportedBlock); if (storedBlock == null) { @@ -2715,7 +2716,7 @@ void processPendingReconstructions() { } public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) { - assert namesystem.hasReadLock(); + assert namesystem.hasReadLock(FSNamesystemLockMode.BM); DatanodeDescriptor node = null; try { node = datanodeManager.getDatanode(nodeReg); @@ -2737,7 +2738,6 @@ public long requestBlockReportLeaseId(DatanodeRegistration nodeReg) { public void registerDatanode(DatanodeRegistration nodeReg) throws IOException { - assert namesystem.hasWriteLock(); datanodeManager.registerDatanode(nodeReg); bmSafeMode.checkSafeMode(); } @@ -3004,7 +3004,7 @@ void removeDNLeaseIfNeeded(DatanodeDescriptor node) { public void removeBRLeaseIfNeeded(final DatanodeID nodeID, final BlockReportContext context) throws IOException { - namesystem.writeLock(); + namesystem.writeLock(FSNamesystemLockMode.BM); DatanodeDescriptor node; try { node = datanodeManager.getDatanode(nodeID); @@ -3022,7 +3022,7 @@ public void removeBRLeaseIfNeeded(final DatanodeID nodeID, } } } finally { - namesystem.writeUnlock("removeBRLeaseIfNeeded"); + namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeBRLeaseIfNeeded"); } } @@ -3214,7 +3214,7 @@ public void markBlockReplicasAsCorrupt(Block oldBlock, BlockInfo block, long oldGenerationStamp, long oldNumBytes, DatanodeStorageInfo[] newStorages) throws IOException { - assert namesystem.hasWriteLock(); + assert namesystem.hasWriteLock(FSNamesystemLockMode.BM); BlockToMarkCorrupt b = null; if (block.getGenerationStamp() != oldGenerationStamp) { b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp, @@ -4433,7 +4433,7 @@ private void removeStoredBlock(DatanodeStorageInfo storageInfo, Block block, */ public void removeStoredBlock(BlockInfo storedBlock, DatanodeDescriptor node) { blockLog.debug("BLOCK* removeStoredBlock: {} from {}", storedBlock, node); - assert (namesystem.hasWriteLock()); + assert (namesystem.hasWriteLock(FSNamesystemLockMode.BM)); { if (storedBlock == null || !blocksMap.removeNode(storedBlock, node)) { blockLog.debug("BLOCK* removeStoredBlock: {} has already been removed from node {}", @@ -4948,7 +4948,7 @@ public int getTotalBlocks() { } public void removeBlock(BlockInfo block) { - assert namesystem.hasWriteLock(); + assert namesystem.hasWriteLock(FSNamesystemLockMode.BM); // No need to ACK blocks that are being removed entirely // from the namespace, since the removal of the associated // file already removes them from the block map below. @@ -4991,7 +4991,7 @@ public void updateLastBlock(BlockInfo lastBlock, ExtendedBlock newBlock) { /** updates a block in needed reconstruction queue. */ private void updateNeededReconstructions(final BlockInfo block, final int curReplicasDelta, int expectedReplicasDelta) { - namesystem.writeLock(); + namesystem.writeLock(FSNamesystemLockMode.BM); try { if (!isPopulatingReplQueues() || !block.isComplete()) { return; @@ -5010,7 +5010,7 @@ private void updateNeededReconstructions(final BlockInfo block, repl.outOfServiceReplicas(), oldExpectedReplicas); } } finally { - namesystem.writeUnlock("updateNeededReconstructions"); + namesystem.writeUnlock(FSNamesystemLockMode.BM, "updateNeededReconstructions"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 081f8348258..a80d255f29a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -22,6 +22,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.classification.VisibleForTesting; +import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; @@ -863,7 +864,7 @@ private void removeDatanode(DatanodeDescriptor nodeInfo) { */ private void removeDatanode(DatanodeDescriptor nodeInfo, boolean removeBlocksFromBlocksMap) { - assert namesystem.hasWriteLock(); + assert namesystem.hasWriteLock(FSNamesystemLockMode.BM); heartbeatManager.removeDatanode(nodeInfo); if (removeBlocksFromBlocksMap) { blockManager.removeBlocksAssociatedTo(nodeInfo); @@ -882,7 +883,7 @@ private void removeDatanode(DatanodeDescriptor nodeInfo, */ public void removeDatanode(final DatanodeID node) throws UnregisteredNodeException { - namesystem.writeLock(); + namesystem.writeLock(FSNamesystemLockMode.BM); try { final DatanodeDescriptor descriptor = getDatanode(node); if (descriptor != null) { @@ -892,7 +893,7 @@ public void removeDatanode(final DatanodeID node) + node + " does not exist"); } } finally { - namesystem.writeUnlock("removeDatanode"); + namesystem.writeUnlock(FSNamesystemLockMode.BM, "removeDatanode"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java index 8d3e17a5352..4e4464b3259 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ProvidedStorageMap.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.common.blockaliasmap.BlockAliasMap; import org.apache.hadoop.hdfs.server.common.blockaliasmap.impl.TextFileRegionAliasMap; import org.apache.hadoop.hdfs.server.common.BlockAlias; +import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State; import org.apache.hadoop.hdfs.util.RwLock; @@ -173,7 +174,7 @@ public LocatedBlockBuilder newLocatedBlocks(int maxValue) { public void removeDatanode(DatanodeDescriptor dnToRemove) { if (providedEnabled) { - assert lock.hasWriteLock() : "Not holding write lock"; + assert lock.hasWriteLock(FSNamesystemLockMode.BM) : "Not holding write lock"; providedDescriptor.remove(dnToRemove); // if all datanodes fail, set the block report count to 0 if (providedDescriptor.activeProvidedDatanodes() == 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java index 339873efadc..37000421abc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode; import org.apache.hadoop.util.Preconditions; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.fs.XAttrSetFlag; @@ -104,7 +105,7 @@ static boolean unprotectedRemoveBlock( */ static void persistBlocks( FSDirectory fsd, String path, INodeFile file, boolean logRetryCache) { - assert fsd.getFSNamesystem().hasWriteLock(); + assert fsd.getFSNamesystem().hasWriteLock(FSNamesystemLockMode.FS); Preconditions.checkArgument(file.isUnderConstruction()); fsd.getEditLog().logUpdateBlocks(path, file, logRetryCache); if(NameNode.stateChangeLog.isDebugEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 9ab996984cb..8ed904b5179 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -1106,7 +1106,7 @@ static void unprotectedUpdateCount(INodesInPath inodesInPath, */ public void updateSpaceForCompleteBlock(BlockInfo completeBlk, INodesInPath inodes) throws IOException { - assert namesystem.hasWriteLock(); + assert namesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL); INodesInPath iip = inodes != null ? inodes : INodesInPath.fromINode(namesystem.getBlockCollection(completeBlk)); INodeFile fileINode = iip.getLastINode().asFile(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 724ba2769e6..7e1c90966ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -1900,11 +1900,11 @@ public void cpUnlock() { NamespaceInfo getNamespaceInfo() { - readLock(); + readLock(FSNamesystemLockMode.FS); try { return unprotectedGetNamespaceInfo(); } finally { - readUnlock("getNamespaceInfo"); + readUnlock(FSNamesystemLockMode.FS, "getNamespaceInfo"); } } @@ -3919,7 +3919,7 @@ Lease reassignLeaseInternal(Lease lease, String newHolder, INodeFile pendingFile void commitOrCompleteLastBlock( final INodeFile fileINode, final INodesInPath iip, final Block commitBlock) throws IOException { - assert hasWriteLock(); + assert hasWriteLock(FSNamesystemLockMode.GLOBAL); Preconditions.checkArgument(fileINode.isUnderConstruction()); blockManager.commitOrCompleteLastBlock(fileINode, commitBlock, iip); } @@ -3941,7 +3941,7 @@ void addCommittedBlocksToPending(final INodeFile pendingFile) { void finalizeINodeFileUnderConstruction(String src, INodeFile pendingFile, int latestSnapshot, boolean allowCommittedBlock) throws IOException { - assert hasWriteLock(); + assert hasWriteLock(FSNamesystemLockMode.GLOBAL); FileUnderConstructionFeature uc = pendingFile.getFileUnderConstructionFeature(); if (uc == null) { @@ -4010,7 +4010,8 @@ INodeFile getBlockCollection(BlockInfo b) { @Override public INodeFile getBlockCollection(long id) { - assert hasReadLock() : "Accessing INode id = " + id + " without read lock"; + assert hasReadLock(FSNamesystemLockMode.FS) + : "Accessing INode id = " + id + " without read lock"; INode inode = getFSDirectory().getInode(id); return inode == null ? null : inode.asFile(); } @@ -4028,7 +4029,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock, + ")"); checkOperation(OperationCategory.WRITE); final String src; - writeLock(); + writeLock(FSNamesystemLockMode.GLOBAL); boolean copyTruncate = false; BlockInfo truncatedBlock = null; try { @@ -4163,7 +4164,7 @@ void commitBlockSynchronization(ExtendedBlock oldBlock, } blockManager.successfulBlockRecovery(storedBlock); } finally { - writeUnlock("commitBlockSynchronization"); + writeUnlock(FSNamesystemLockMode.GLOBAL, "commitBlockSynchronization"); } getEditLog().logSync(); if (closeFile) { @@ -4424,11 +4425,11 @@ BatchedDirectoryListing getBatchedListing(String[] srcs, byte[] startAfter, * @see org.apache.hadoop.hdfs.server.datanode.DataNode */ void registerDatanode(DatanodeRegistration nodeReg) throws IOException { - writeLock(); + writeLock(FSNamesystemLockMode.BM); try { blockManager.registerDatanode(nodeReg); } finally { - writeUnlock("registerDatanode"); + writeUnlock(FSNamesystemLockMode.BM, "registerDatanode"); } } @@ -4461,7 +4462,7 @@ HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg, @Nonnull SlowPeerReports slowPeers, @Nonnull SlowDiskReports slowDisks) throws IOException { - readLock(); + readLock(FSNamesystemLockMode.BM); try { //get datanode commands DatanodeCommand[] cmds = blockManager.getDatanodeManager().handleHeartbeat( @@ -4484,7 +4485,7 @@ nodeReg, reports, getBlockPoolId(), cacheCapacity, cacheUsed, return new HeartbeatResponse(cmds, haState, rollingUpgradeInfo, blockReportLeaseId, isSlownode); } finally { - readUnlock("handleHeartbeat"); + readUnlock(FSNamesystemLockMode.BM, "handleHeartbeat"); } } @@ -4546,7 +4547,7 @@ void checkAvailableResources() { * @param file */ private void closeFile(String path, INodeFile file) { - assert hasWriteLock(); + assert hasWriteLock(FSNamesystemLockMode.FS); // file is closed getEditLog().logCloseFile(path, file); NameNode.stateChangeLog.debug("closeFile: {} with {} blocks is persisted to the file system", @@ -5904,7 +5905,7 @@ private long nextBlockId(BlockType blockType) throws IOException { } boolean isFileDeleted(INodeFile file) { - assert hasReadLock(); + assert hasReadLock(FSNamesystemLockMode.FS); // Not in the inodeMap or in the snapshot but marked deleted. if (dir.getInode(file.getId()) == null) { return true; @@ -5982,7 +5983,7 @@ private INodeFile checkUCBlock(ExtendedBlock block, */ void reportBadBlocks(LocatedBlock[] blocks) throws IOException { checkOperation(OperationCategory.WRITE); - writeLock(); + writeLock(FSNamesystemLockMode.BM); try { checkOperation(OperationCategory.WRITE); for (int i = 0; i < blocks.length; i++) { @@ -5998,7 +5999,7 @@ void reportBadBlocks(LocatedBlock[] blocks) throws IOException { } } } finally { - writeUnlock("reportBadBlocks"); + writeUnlock(FSNamesystemLockMode.BM, "reportBadBlocks"); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index a456041d1f9..697c4975e2f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -21,6 +21,7 @@ import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; +import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode; import org.apache.hadoop.thirdparty.com.google.common.base.Joiner; import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList; import org.apache.hadoop.thirdparty.com.google.common.collect.LinkedListMultimap; @@ -166,6 +167,10 @@ public void setupMockCluster() throws IOException { fsn = Mockito.mock(FSNamesystem.class); Mockito.doReturn(true).when(fsn).hasWriteLock(); Mockito.doReturn(true).when(fsn).hasReadLock(); + Mockito.doReturn(true).when(fsn).hasWriteLock(FSNamesystemLockMode.GLOBAL); + Mockito.doReturn(true).when(fsn).hasReadLock(FSNamesystemLockMode.GLOBAL); + Mockito.doReturn(true).when(fsn).hasWriteLock(FSNamesystemLockMode.BM); + Mockito.doReturn(true).when(fsn).hasReadLock(FSNamesystemLockMode.BM); Mockito.doReturn(true).when(fsn).isRunning(); //Make shouldPopulaeReplQueues return true HAContext haContext = Mockito.mock(HAContext.class); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java index bcdba957758..de89d4c8e25 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java @@ -36,6 +36,7 @@ import java.util.Random; import java.util.Set; +import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -119,6 +120,7 @@ public void testNumVersionsCorrectAfterReregister() //Create the DatanodeManager which will be tested FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); + Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true); Configuration conf = new Configuration(); conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 0); conf.setLong(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 10); @@ -154,6 +156,7 @@ public void testHost2NodeMapCorrectAfterReregister() //Create the DatanodeManager which will be tested FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); + Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true); Configuration conf = new Configuration(); DatanodeManager dm = mockDatanodeManager(fsn, conf); @@ -184,6 +187,7 @@ public void testNumVersionsReportedCorrect() throws IOException { //Create the DatanodeManager which will be tested FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); + Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true); DatanodeManager dm = mockDatanodeManager(fsn, new Configuration()); //Seed the RNG with a known value so test failures are easier to reproduce @@ -283,7 +287,8 @@ public void testRejectUnresolvedDatanodes() throws IOException { //Create the DatanodeManager which will be tested FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); - + Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true); + Configuration conf = new Configuration(); //Set configuration property for rejecting unresolved topology mapping @@ -401,6 +406,7 @@ public void HelperFunction(String scriptFileName, int providedStorages) Configuration conf = new Configuration(); FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); + Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true); if (scriptFileName != null && !scriptFileName.isEmpty()) { URL shellScript = getClass().getResource(scriptFileName); Path resourcePath = Paths.get(shellScript.toURI()); @@ -499,6 +505,7 @@ public void testGetBlockLocations() Configuration conf = new Configuration(); FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); + Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true); URL shellScript = getClass().getResource( "/" + Shell.appendScriptExtension("topology-script")); Path resourcePath = Paths.get(shellScript.toURI()); @@ -648,6 +655,7 @@ public void testGetBlockLocationConsiderLoadWithNodesOfSameDistance() DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true); FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); + Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true); URL shellScript = getClass() .getResource("/" + Shell.appendScriptExtension("topology-script")); Path resourcePath = Paths.get(shellScript.toURI()); @@ -715,6 +723,7 @@ public void testGetBlockLocationConsiderStorageType() DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true); FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); + Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true); URL shellScript = getClass() .getResource("/" + Shell.appendScriptExtension("topology-script")); Path resourcePath = Paths.get(shellScript.toURI()); @@ -801,6 +810,7 @@ public void testGetBlockLocationConsiderStorageTypeAndLoad() DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true); FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); + Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true); URL shellScript = getClass() .getResource("/" + Shell.appendScriptExtension("topology-script")); Path resourcePath = Paths.get(shellScript.toURI()); @@ -890,6 +900,7 @@ public void testRemoveIncludedNode() throws IOException { // Set the write lock so that the DatanodeManager can start Mockito.when(fsn.hasWriteLock()).thenReturn(true); + Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true); DatanodeManager dm = mockDatanodeManager(fsn, new Configuration()); HostFileManager hm = new HostFileManager(); @@ -988,6 +999,7 @@ private void verifyPendingRecoveryTasks( throws IOException { FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); + Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true); Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, maxTransfers); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, @@ -1142,6 +1154,7 @@ public void verifyComputeReconstructedTaskNum(int xmitsInProgress, int numReplic throws IOException { FSNamesystem fsn = Mockito.mock(FSNamesystem.class); Mockito.when(fsn.hasWriteLock()).thenReturn(true); + Mockito.when(fsn.hasWriteLock(FSNamesystemLockMode.BM)).thenReturn(true); Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, maxTransfers); DatanodeManager dm = Mockito.spy(mockDatanodeManager(fsn, conf)); --------------------------------------------------------------------- To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org