This is an automated email from the ASF dual-hosted git repository.

zanderxu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit be0267313bdfb1001fa61f31c745c2f7f0db069d
Author: ZanderXu <zande...@apache.org>
AuthorDate: Tue Apr 23 09:42:23 2024 +0800

    HDFS-17457. [FGL] UTs support fine-grained locking (#6741)
---
 .../server/federation/router/TestRouterRpc.java    |  5 +++--
 .../hadoop/hdfs/TestBlocksScheduledCounter.java    | 11 ++++++----
 .../org/apache/hadoop/hdfs/TestFileCorruption.java | 13 +++++------
 .../blockmanagement/BlockManagerTestUtil.java      | 22 ++++++++++---------
 .../server/blockmanagement/TestBlockManager.java   |  8 +++----
 .../TestBlocksWithNotEnoughRacks.java              |  5 +++--
 .../blockmanagement/TestComputeInvalidateWork.java | 25 +++++++++++-----------
 .../blockmanagement/TestHeartbeatHandling.java     |  9 ++++----
 .../TestNameNodePrunesMissingStorages.java         | 10 +++++----
 .../hdfs/server/blockmanagement/TestNodeCount.java |  5 +++--
 .../blockmanagement/TestOverReplicatedBlocks.java  |  9 ++++----
 .../blockmanagement/TestPendingReconstruction.java | 17 ++++++++-------
 ...tReconstructStripedBlocksWithRackAwareness.java | 10 +++++----
 .../TestReplicationPolicyConsiderLoad.java         | 10 +++++----
 .../TestReplicationPolicyExcludeSlowNodes.java     | 11 ++++++----
 ...licationPolicyRatioConsiderLoadWithStorage.java |  6 ++++--
 .../hdfs/server/namenode/NameNodeAdapter.java      | 24 ++++++++++++++-------
 .../hdfs/server/namenode/TestAddBlockRetry.java    | 14 ++++++------
 .../TestAddOverReplicatedStripedBlocks.java        |  6 ++++--
 .../TestBlockPlacementPolicyRackFaultTolerant.java |  6 ++++--
 .../hdfs/server/namenode/TestCacheDirectives.java  | 17 ++++++++-------
 .../hdfs/server/namenode/TestDeleteRace.java       |  5 +++--
 .../server/namenode/TestDiskspaceQuotaUpdate.java  |  5 +++--
 .../hdfs/server/namenode/TestEditLogRace.java      |  6 ++++--
 .../server/namenode/TestFSImageWithSnapshot.java   |  9 ++++----
 .../hdfs/server/namenode/TestFSNamesystem.java     |  5 +++--
 .../server/namenode/TestFSNamesystemMBean.java     |  7 +++---
 .../hdfs/server/namenode/TestFileTruncate.java     |  9 ++++----
 .../hadoop/hdfs/server/namenode/TestFsck.java      |  9 ++++----
 .../server/namenode/TestGetBlockLocations.java     | 14 ++++++------
 .../server/namenode/TestLargeDirectoryDelete.java  |  5 +++--
 .../hdfs/server/namenode/TestListOpenFiles.java    |  5 +++--
 .../namenode/TestNameNodeMetadataConsistency.java  | 11 ++++++----
 .../namenode/TestReconstructStripedBlocks.java     |  5 +++--
 .../server/namenode/TestSecurityTokenEditLog.java  |  5 +++--
 .../hdfs/server/namenode/ha/TestDNFencing.java     |  5 +++--
 .../namenode/metrics/TestNameNodeMetrics.java      | 13 +++++------
 ...TestINodeFileUnderConstructionWithSnapshot.java |  5 +++--
 .../org/apache/hadoop/hdfs/tools/TestDFSAdmin.java |  5 +++--
 .../namenode/ITestProvidedImplementation.java      | 13 +++++------
 40 files changed, 223 insertions(+), 161 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
index 7edb5492330..4eb3e4184b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterRpc.java
@@ -121,6 +121,7 @@
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import 
org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations;
@@ -1698,10 +1699,10 @@ public void testGetReplicatedBlockStats() throws 
Exception {
       // mark a replica as corrupt
       LocatedBlock block = NameNodeAdapter
           .getBlockLocations(nameNode, testFile, 0, 1024).get(0);
-      namesystem.writeLock();
+      namesystem.writeLock(FSNamesystemLockMode.BM);
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
           "STORAGE_ID", "TEST");
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, 
"findAndMarkBlockAsCorrupt");
       BlockManagerTestUtil.updateState(bm);
       DFSTestUtil.waitCorruptReplicas(fileSystem, namesystem,
           new Path(testFile), block.getBlock(), 1);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
index d86700b39b1..e2e36110049 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
@@ -35,6 +35,7 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.junit.After;
 import org.junit.Test;
 
@@ -175,7 +176,7 @@ public void 
testScheduledBlocksCounterDecrementOnDeletedBlock()
           .getBlockLocations(cluster.getNameNode(), filePath.toString(), 0, 1)
           .get(0);
       DatanodeInfo[] locs = block.getLocations();
-      cluster.getNamesystem().writeLock();
+      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
       try {
         bm.findAndMarkBlockAsCorrupt(block.getBlock(), locs[0], "STORAGE_ID",
             "TEST");
@@ -185,7 +186,8 @@ public void 
testScheduledBlocksCounterDecrementOnDeletedBlock()
         BlockManagerTestUtil.updateState(bm);
         assertEquals(1L, bm.getPendingReconstructionBlocksCount());
       } finally {
-        cluster.getNamesystem().writeUnlock();
+        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+            "findAndMarkBlockAsCorrupt");
       }
 
       // 4. delete the file
@@ -238,13 +240,14 @@ public void testBlocksScheduledCounterOnTruncate() throws 
Exception {
         DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
       }
 
-      cluster.getNamesystem().writeLock();
+      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
       try {
         BlockManagerTestUtil.computeAllPendingWork(bm);
         BlockManagerTestUtil.updateState(bm);
         assertEquals(1L, bm.getPendingReconstructionBlocksCount());
       } finally {
-        cluster.getNamesystem().writeUnlock();
+        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+            "testBlocksScheduledCounterOnTruncate");
       }
 
       // 5.truncate the file whose block exists in pending reconstruction
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
index 381cf1694f5..f7088bc7ce5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java
@@ -51,6 +51,7 @@
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -159,13 +160,13 @@ public void testArrayOutOfBoundsException() throws 
Exception {
       DatanodeRegistration dnR = InternalDataNodeTestUtils.
         getDNRegistrationForBP(dataNode, blk.getBlockPoolId());
       FSNamesystem ns = cluster.getNamesystem();
-      ns.writeLock();
+      ns.writeLock(FSNamesystemLockMode.BM);
       try {
         
cluster.getNamesystem().getBlockManager().findAndMarkBlockAsCorrupt(blk,
             new DatanodeInfoBuilder().setNodeID(dnR).build(), "TEST",
             "STORAGE_ID");
       } finally {
-        ns.writeUnlock();
+        ns.writeUnlock(FSNamesystemLockMode.BM, 
"testArrayOutOfBoundsException");
       }
       
       // open the file
@@ -210,16 +211,16 @@ public void testCorruptionWithDiskFailure() throws 
Exception {
       FSNamesystem ns = cluster.getNamesystem();
       //fail the storage on that node which has the block
       try {
-        ns.writeLock();
+        ns.writeLock(FSNamesystemLockMode.BM);
         updateAllStorages(bm);
       } finally {
-        ns.writeUnlock();
+        ns.writeUnlock(FSNamesystemLockMode.BM, 
"testCorruptionWithDiskFailure");
       }
-      ns.writeLock();
+      ns.writeLock(FSNamesystemLockMode.BM);
       try {
         markAllBlocksAsCorrupt(bm, blk);
       } finally {
-        ns.writeUnlock();
+        ns.writeUnlock(FSNamesystemLockMode.BM, 
"testCorruptionWithDiskFailure");
       }
 
       // open the file
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
index c25cc88059d..5278f6e2dd4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManagerTestUtil.java
@@ -31,6 +31,7 @@
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerSafeMode.BMSafeModeStatus;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.test.Whitebox;
@@ -50,23 +51,23 @@ public static void setNodeReplicationLimit(final 
BlockManager blockManager,
   /** @return the datanode descriptor for the given the given storageID. */
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
       final String storageID) {
-    ns.readLock();
+    ns.readLock(FSNamesystemLockMode.BM);
     try {
       return ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
     } finally {
-      ns.readUnlock();
+      ns.readUnlock(FSNamesystemLockMode.BM, "getDatanode");
     }
   }
 
   public static Iterator<BlockInfo> getBlockIterator(final FSNamesystem ns,
       final String storageID, final int startBlock) {
-    ns.readLock();
+    ns.readLock(FSNamesystemLockMode.BM);
     try {
       DatanodeDescriptor dn =
           ns.getBlockManager().getDatanodeManager().getDatanode(storageID);
       return dn.getBlockIterator(startBlock);
     } finally {
-      ns.readUnlock();
+      ns.readUnlock(FSNamesystemLockMode.BM, "getBlockIterator");
     }
   }
 
@@ -88,7 +89,7 @@ public static void updateState(final BlockManager 
blockManager) {
    */
   public static int[] getReplicaInfo(final FSNamesystem namesystem, final 
Block b) {
     final BlockManager bm = namesystem.getBlockManager();
-    namesystem.readLock();
+    namesystem.readLock(FSNamesystemLockMode.BM);
     try {
       final BlockInfo storedBlock = bm.getStoredBlock(b);
       return new int[]{getNumberOfRacks(bm, b),
@@ -96,7 +97,7 @@ public static int[] getReplicaInfo(final FSNamesystem 
namesystem, final Block b)
           bm.neededReconstruction.contains(storedBlock) ? 1 : 0,
           getNumberOfDomains(bm, b)};
     } finally {
-      namesystem.readUnlock();
+      namesystem.readUnlock(FSNamesystemLockMode.BM, "getReplicaInfo");
     }
   }
 
@@ -247,7 +248,7 @@ public static int computeAllPendingWork(BlockManager bm) {
    */
   public static void noticeDeadDatanode(NameNode nn, String dnName) {
     FSNamesystem namesystem = nn.getNamesystem();
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
       DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
       HeartbeatManager hbm = dnm.getHeartbeatManager();
@@ -265,7 +266,7 @@ public static void noticeDeadDatanode(NameNode nn, String 
dnName) {
         hbm.heartbeatCheck();
       }
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, "noticeDeadDatanode");
     }
   }
   
@@ -302,12 +303,13 @@ public static void checkHeartbeat(BlockManager bm) {
    */
   public static int checkHeartbeatAndGetUnderReplicatedBlocksCount(
       FSNamesystem namesystem, BlockManager bm) {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
       bm.getDatanodeManager().getHeartbeatManager().heartbeatCheck();
       return bm.getUnderReplicatedNotMissingBlocks();
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM,
+          "checkHeartbeatAndGetUnderReplicatedBlocksCount");
     }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 22d203c98d2..cef50956673 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -1624,7 +1624,7 @@ public void testBlockManagerMachinesArray() throws 
Exception {
       }
       failedStorageDataNode.updateHeartbeat(reports.toArray(StorageReport
           .EMPTY_ARRAY), 0L, 0L, 0, 0, null);
-      ns.writeLock();
+      ns.writeLock(FSNamesystemLockMode.BM);
       DatanodeStorageInfo corruptStorageInfo= null;
       for(int i=0; i<corruptStorageDataNode.getStorageInfos().length; i++) {
         corruptStorageInfo = corruptStorageDataNode.getStorageInfos()[i];
@@ -1638,16 +1638,16 @@ public void testBlockManagerMachinesArray() throws 
Exception {
       blockManager.findAndMarkBlockAsCorrupt(blk, corruptStorageDataNode,
           corruptStorageInfo.getStorageID(),
           CorruptReplicasMap.Reason.ANY.toString());
-      ns.writeUnlock();
+      ns.writeUnlock(FSNamesystemLockMode.BM, "testBlockManagerMachinesArray");
       BlockInfo[] blockInfos = new BlockInfo[] {blockInfo};
-      ns.readLock();
+      ns.readLock(FSNamesystemLockMode.BM);
       LocatedBlocks locatedBlocks =
           blockManager.createLocatedBlocks(blockInfos, 3L, false, 0L, 3L,
               false, false, null, null);
       assertTrue("Located Blocks should exclude corrupt" +
               "replicas and failed storages",
           locatedBlocks.getLocatedBlocks().size() == 1);
-      ns.readUnlock();
+      ns.readUnlock(FSNamesystemLockMode.BM, "open");
     } finally {
       if (cluster != null) {
         cluster.shutdown();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
index 7dfb9514f9e..059b5829f4c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlocksWithNotEnoughRacks.java
@@ -23,6 +23,7 @@
 import java.util.List;
 import java.util.concurrent.TimeoutException;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -602,11 +603,11 @@ public void 
testMultipleReplicasScheduledForUpgradeDomain() throws Exception {
 
   static BlockReconstructionWork scheduleReconstruction(
       FSNamesystem fsn, BlockInfo block, int priority) {
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.BM);
     try {
       return fsn.getBlockManager().scheduleReconstruction(block, priority);
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.BM, "scheduleReconstruction");
     }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
index 4ae0316fa7a..38396132f2e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestComputeInvalidateWork.java
@@ -40,6 +40,7 @@
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.test.Whitebox;
 import org.apache.hadoop.util.VersionInfo;
@@ -131,7 +132,7 @@ private void verifyInvalidationWorkCounts(int 
blockInvalidateLimit) {
   public void testComputeInvalidateReplicas() throws Exception {
     final int blockInvalidateLimit = bm.getDatanodeManager()
         .getBlockInvalidateLimit();
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
       for (int i=0; i<nodes.length; i++) {
         for(int j=0; j<3*blockInvalidateLimit+1; j++) {
@@ -142,7 +143,7 @@ public void testComputeInvalidateReplicas() throws 
Exception {
       }
       verifyInvalidationWorkCounts(blockInvalidateLimit);
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, 
"testComputeInvalidateReplicas");
     }
   }
 
@@ -154,7 +155,7 @@ public void testComputeInvalidateReplicas() throws 
Exception {
   public void testComputeInvalidateStripedBlockGroups() throws Exception {
     final int blockInvalidateLimit =
         bm.getDatanodeManager().getBlockInvalidateLimit();
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
       int nodeCount = ecPolicy.getNumDataUnits() + 
ecPolicy.getNumParityUnits();
       for (int i = 0; i < nodeCount; i++) {
@@ -167,7 +168,7 @@ public void testComputeInvalidateStripedBlockGroups() 
throws Exception {
       }
       verifyInvalidationWorkCounts(blockInvalidateLimit);
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, 
"testComputeInvalidateStripedBlockGroups");
     }
   }
 
@@ -181,7 +182,7 @@ public void testComputeInvalidate() throws Exception {
     final int blockInvalidateLimit =
         bm.getDatanodeManager().getBlockInvalidateLimit();
     final Random random = new Random(System.currentTimeMillis());
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
       int nodeCount = ecPolicy.getNumDataUnits() + 
ecPolicy.getNumParityUnits();
       for (int i = 0; i < nodeCount; i++) {
@@ -201,7 +202,7 @@ public void testComputeInvalidate() throws Exception {
       }
       verifyInvalidationWorkCounts(blockInvalidateLimit);
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testComputeInvalidate");
     }
   }
 
@@ -212,7 +213,7 @@ public void testComputeInvalidate() throws Exception {
    */
   @Test(timeout=120000)
   public void testDatanodeReformat() throws Exception {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
       // Change the datanode UUID to emulate a reformat
       String poolId = cluster.getNamesystem().getBlockPoolId();
@@ -234,7 +235,7 @@ public void testDatanodeReformat() throws Exception {
       assertEquals(0, bm.computeInvalidateWork(1));
       assertEquals(0, bm.getPendingDeletionBlocksCount());
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, "testDatanodeReformat");
     }
   }
 
@@ -255,7 +256,7 @@ public void testDatanodeReRegistration() throws Exception {
     dfs.delete(ecFile, false);
     BlockManagerTestUtil.waitForMarkedDeleteQueueIsEmpty(
         cluster.getNamesystem(0).getBlockManager());
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     InvalidateBlocks invalidateBlocks;
     int totalStripedDataBlocks = totalBlockGroups * (ecPolicy.getNumDataUnits()
         + ecPolicy.getNumParityUnits());
@@ -272,7 +273,7 @@ public void testDatanodeReRegistration() throws Exception {
       assertEquals("Unexpected invalidate count for striped block groups!",
           totalStripedDataBlocks, invalidateBlocks.getECBlocks());
     } finally {
-      namesystem.writeUnlock();
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, 
"testDatanodeReRegistration");
     }
     // Re-register each DN and see that it wipes the invalidation work
     int totalBlockGroupsPerDataNode = totalBlockGroups;
@@ -284,14 +285,14 @@ public void testDatanodeReRegistration() throws Exception 
{
           new StorageInfo(HdfsServerConstants.NodeType.DATA_NODE),
           new ExportedBlockKeys(),
           VersionInfo.getVersion());
-      namesystem.writeLock();
+      namesystem.writeLock(FSNamesystemLockMode.BM);
       try {
         bm.getDatanodeManager().registerDatanode(reg);
         expected -= (totalReplicasPerDataNode + totalBlockGroupsPerDataNode);
         assertEquals("Expected number of invalidate blocks to decrease",
             (long) expected, invalidateBlocks.numBlocks());
       } finally {
-          namesystem.writeUnlock();
+        namesystem.writeUnlock(FSNamesystemLockMode.BM, 
"testDatanodeReRegistration");
       }
     }
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
index f12f6f59f82..c7bf6ec8f6b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHeartbeatHandling.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
@@ -91,7 +92,7 @@ public void testHeartbeat() throws Exception {
       final DatanodeStorageInfo[] ONE_TARGET = {dd.getStorageInfo(storageID)};
 
       try {
-        namesystem.writeLock();
+        namesystem.writeLock(FSNamesystemLockMode.BM);
         synchronized(hm) {
           for (int i=0; i<MAX_REPLICATE_BLOCKS; i++) {
             dd.addBlockToBeReplicated(
@@ -136,7 +137,7 @@ public void testHeartbeat() throws Exception {
           assertEquals(0, cmds.length);
         }
       } finally {
-        namesystem.writeUnlock();
+        namesystem.writeUnlock(FSNamesystemLockMode.BM, "testHeartbeat");
       }
     } finally {
       cluster.shutdown();
@@ -176,7 +177,7 @@ public void testHeartbeatBlockRecovery() throws Exception {
       dd3.updateStorage(new DatanodeStorage(DatanodeStorage.generateUuid()));
 
       try {
-        namesystem.writeLock();
+        namesystem.writeLock(FSNamesystemLockMode.BM);
         synchronized(hm) {
           NameNodeAdapter.sendHeartBeat(nodeReg1, dd1, namesystem);
           NameNodeAdapter.sendHeartBeat(nodeReg2, dd2, namesystem);
@@ -255,7 +256,7 @@ public void testHeartbeatBlockRecovery() throws Exception {
           assertEquals(recoveringNodes[2], dd3);
         }
       } finally {
-        namesystem.writeUnlock();
+        namesystem.writeUnlock(FSNamesystemLockMode.BM, "testHeartbeat");
       }
     } finally {
       cluster.shutdown();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index dea893bab3b..944b27e656c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -39,6 +39,7 @@
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
@@ -184,7 +185,7 @@ public void testRemovingStorageDoesNotProduceZombies() 
throws Exception {
         DataNodeTestUtils.triggerBlockReport(dn);
       }
       ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path("/foo1"));
-      cluster.getNamesystem().writeLock();
+      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
       final String storageIdToRemove;
       String datanodeUuid;
       // Find the first storage which this block is in.
@@ -200,7 +201,8 @@ public void testRemovingStorageDoesNotProduceZombies() 
throws Exception {
         storageIdToRemove = info.getStorageID();
         datanodeUuid = info.getDatanodeDescriptor().getDatanodeUuid();
       } finally {
-        cluster.getNamesystem().writeUnlock();
+        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+            "testRemovingStorageDoesNotProduceZombies");
       }
       // Find the DataNode which holds that first storage.
       final DataNode datanodeToRemoveStorageFrom;
@@ -345,7 +347,7 @@ public void testRenamingStorageIds() throws Exception {
       GenericTestUtils.waitFor(new Supplier<Boolean>() {
         @Override
         public Boolean get() {
-          cluster.getNamesystem().writeLock();
+          cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
           try {
             Iterator<DatanodeStorageInfo> storageInfoIter =
                 cluster.getNamesystem().getBlockManager().
@@ -367,7 +369,7 @@ public Boolean get() {
             LOG.info("Successfully found " + block.getBlockName() + " in " +
                 "be in storage id " + newStorageId);
           } finally {
-            cluster.getNamesystem().writeUnlock();
+            cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, 
"testRenamingStorageIds");
           }
           return true;
         }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
index d915b6e6b73..fa8f0af5a85 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNodeCount.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Time;
 import org.junit.Test;
 
@@ -174,14 +175,14 @@ void checkTimeout(String testLabel, long cycleTime) 
throws TimeoutException {
   /* threadsafe read of the replication counts for this block */
   NumberReplicas countNodes(Block block, FSNamesystem namesystem) {
     BlockManager blockManager = namesystem.getBlockManager();
-    namesystem.readLock();
+    namesystem.readLock(FSNamesystemLockMode.BM);
     try {
       lastBlock = block;
       lastNum = blockManager.countNodes(blockManager.getStoredBlock(block));
       return lastNum;
     }
     finally {
-      namesystem.readUnlock();
+      namesystem.readUnlock(FSNamesystemLockMode.BM, "countNodes");
     }
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
index d5f2fb99b78..d964f426986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
@@ -39,6 +39,7 @@
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.junit.Test;
 
@@ -95,7 +96,7 @@ public void testProcesOverReplicateBlock() throws Exception {
       final BlockManager bm = namesystem.getBlockManager();
       final HeartbeatManager hm = 
bm.getDatanodeManager().getHeartbeatManager();
       try {
-        namesystem.writeLock();
+        namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
         synchronized(hm) {
           // set live datanode's remaining space to be 0 
           // so they will be chosen to be deleted when over-replication occurs
@@ -118,7 +119,7 @@ public void testProcesOverReplicateBlock() throws Exception 
{
               bm.getStoredBlock(block.getLocalBlock())).liveReplicas());
         }
       } finally {
-        namesystem.writeUnlock();
+        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, 
"testProcesOverReplicateBlock");
       }
       
     } finally {
@@ -181,11 +182,11 @@ public void testChooseReplicaToDelete() throws Exception {
 
       // All replicas for deletion should be scheduled on lastDN.
       // And should not actually be deleted, because lastDN does not heartbeat.
-      namesystem.readLock();
+      namesystem.readLock(FSNamesystemLockMode.BM);
       final int dnBlocks = bm.getExcessSize4Testing(dnReg.getDatanodeUuid());
       assertEquals("Replicas on node " + lastDNid + " should have been 
deleted",
           SMALL_FILE_LENGTH / SMALL_BLOCK_SIZE, dnBlocks);
-      namesystem.readUnlock();
+      namesystem.readUnlock(FSNamesystemLockMode.BM, "excessSize4Testing");
       for(BlockLocation location : locs)
         assertEquals("Block should still have 4 replicas",
             4, location.getNames().length);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
index ea7347f9e50..d5de79f67d3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReconstruction.java
@@ -51,6 +51,7 @@
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
@@ -288,13 +289,13 @@ public void testProcessPendingReconstructions() throws 
Exception {
 
       // A received IBR processing calls addBlock(). If the gen stamp in the
       // report is not the same, it should stay in pending.
-      fsn.writeLock();
+      fsn.writeLock(FSNamesystemLockMode.BM);
       try {
         // Use a wrong gen stamp.
         blkManager.addBlock(desc[0].getStorageInfos()[0],
             new Block(1, 1, 0), null);
       } finally {
-        fsn.writeUnlock();
+        fsn.writeUnlock(FSNamesystemLockMode.BM, 
"testProcessPendingReconstructions");
       }
 
       // The block should still be pending
@@ -303,12 +304,12 @@ public void testProcessPendingReconstructions() throws 
Exception {
 
       // A block report with the correct gen stamp should remove the record
       // from the pending queue.
-      fsn.writeLock();
+      fsn.writeLock(FSNamesystemLockMode.BM);
       try {
         blkManager.addBlock(desc[0].getStorageInfos()[0],
             new Block(1, 1, 1), null);
       } finally {
-        fsn.writeUnlock();
+        fsn.writeUnlock(FSNamesystemLockMode.BM, 
"testProcessPendingReconstructions");
       }
 
       GenericTestUtils.waitFor(() -> pendingReconstruction.size() == 0, 500,
@@ -459,7 +460,7 @@ public void testPendingAndInvalidate() throws Exception {
       // 3. mark a couple of blocks as corrupt
       LocatedBlock block = NameNodeAdapter.getBlockLocations(
           cluster.getNameNode(), filePath.toString(), 0, 1).get(0);
-      cluster.getNamesystem().writeLock();
+      cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
       try {
         bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
             "STORAGE_ID", "TEST");
@@ -471,7 +472,7 @@ public void testPendingAndInvalidate() throws Exception {
         BlockInfo storedBlock = 
bm.getStoredBlock(block.getBlock().getLocalBlock());
         assertEquals(bm.pendingReconstruction.getNumReplicas(storedBlock), 2);
       } finally {
-        cluster.getNamesystem().writeUnlock();
+        cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, 
"testPendingAndInvalidate");
       }
 
       // 4. delete the file
@@ -507,7 +508,7 @@ public void testReplicationCounter() throws IOException,
         DATANODE_COUNT).build();
     tmpCluster.waitActive();
     FSNamesystem fsn = tmpCluster.getNamesystem(0);
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.BM);
 
     try {
       BlockManager bm = fsn.getBlockManager();
@@ -563,7 +564,7 @@ public Boolean get() {
       }, 100, 60000);
     } finally {
       tmpCluster.shutdown();
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.BM, "testReplicationCounter");
     }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
index 19b845007bf..0d5c5142081 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReconstructStripedBlocksWithRackAwareness.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.Whitebox;
@@ -196,11 +197,11 @@ public void testReconstructForNotEnoughRacks() throws 
Exception {
       DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
     }
 
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.BM);
     try {
       bm.processMisReplicatedBlocks();
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.BM, 
"testReconstructForNotEnoughRacks");
     }
 
     // check if redundancy monitor correctly schedule the reconstruction work.
@@ -342,12 +343,13 @@ public void testReconstructionWithDecommission() throws 
Exception {
     final DatanodeAdminManager decomManager =
         (DatanodeAdminManager) Whitebox.getInternalState(
             dm, "datanodeAdminManager");
-    cluster.getNamesystem().writeLock();
+    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
       dn9.stopDecommission();
       decomManager.startDecommission(dn9);
     } finally {
-      cluster.getNamesystem().writeUnlock();
+      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testReconstructionWithDecommission");
     }
 
     // make sure the decommission finishes and the block in on 6 racks
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
index fef0b45f39c..a182f21e16a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
@@ -31,6 +31,7 @@
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
@@ -72,7 +73,7 @@ DatanodeDescriptor[] getDatanodeDescriptors(Configuration 
conf) {
    */
   @Test
   public void testChooseTargetWithDecomNodes() throws IOException {
-    namenode.getNamesystem().writeLock();
+    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
       dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[3],
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[3]),
@@ -124,14 +125,15 @@ public void testChooseTargetWithDecomNodes() throws 
IOException {
       dataNodes[0].stopDecommission();
       dataNodes[1].stopDecommission();
       dataNodes[2].stopDecommission();
-      namenode.getNamesystem().writeUnlock();
+      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testChooseTargetWithDecomNodes");
     }
     NameNode.LOG.info("Done working on it");
   }
 
   @Test
   public void testConsiderLoadFactor() throws IOException {
-    namenode.getNamesystem().writeLock();
+    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
       dnManager.getHeartbeatManager().updateHeartbeat(dataNodes[0],
           BlockManagerTestUtil.getStorageReportsForDatanode(dataNodes[0]),
@@ -178,7 +180,7 @@ public void testConsiderLoadFactor() throws IOException {
             info.getDatanodeDescriptor().getXceiverCount() <= (load/6)*1.2);
       }
     } finally {
-      namenode.getNamesystem().writeUnlock();
+      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, 
"testConsiderLoadFactor");
     }
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java
index d1e856d2357..baeac8f5619 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyExcludeSlowNodes.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.OutlierMetrics;
 
 import org.apache.hadoop.test.GenericTestUtils;
@@ -85,7 +86,7 @@ DatanodeDescriptor[] getDatanodeDescriptors(Configuration 
conf) {
    */
   @Test
   public void testChooseTargetExcludeSlowNodes() throws Exception {
-    namenode.getNamesystem().writeLock();
+    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
       // add nodes
       for (int i = 0; i < dataNodes.length; i++) {
@@ -135,14 +136,15 @@ public void testChooseTargetExcludeSlowNodes() throws 
Exception {
             .getDatanodeUuid()));
       }
     } finally {
-      namenode.getNamesystem().writeUnlock();
+      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testChooseTargetExcludeSlowNodes");
     }
     NameNode.LOG.info("Done working on it");
   }
 
   @Test
   public void testSlowPeerTrackerEnabledClearSlowNodes() throws Exception {
-    namenode.getNamesystem().writeLock();
+    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
       // add nodes
       for (DatanodeDescriptor dataNode : dataNodes) {
@@ -172,7 +174,8 @@ public void testSlowPeerTrackerEnabledClearSlowNodes() 
throws Exception {
       assertTrue(dnManager.isSlowPeerCollectorInitialized());
       assertEquals(0, DatanodeManager.getSlowNodesUuidSet().size());
     } finally {
-      namenode.getNamesystem().writeUnlock();
+      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testSlowPeerTrackerEnabledClearSlowNodes");
     }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java
index d06af054699..31f154315a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyRatioConsiderLoadWithStorage.java
@@ -22,6 +22,7 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.TestBlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.junit.Test;
 
@@ -91,7 +92,7 @@ DatanodeDescriptor[] getDatanodeDescriptors(Configuration 
conf) {
    */
   @Test
   public void testChooseTargetWithRatioConsiderLoad() {
-    namenode.getNamesystem().writeLock();
+    namenode.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
       // After heartbeat has been processed, the total load should be 200.
       // And average load per node should be 40. The max load should be 2 * 40;
@@ -163,7 +164,8 @@ public void testChooseTargetWithRatioConsiderLoad() {
       assertTrue(targetSet.contains(dataNodes[3]));
       assertTrue(targetSet.contains(dataNodes[4]));
     } finally {
-      namenode.getNamesystem().writeUnlock();
+      namenode.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testChooseTargetWithRatioConsiderLoad");
     }
   }
 }
\ No newline at end of file
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index 374ec529a41..3d1b2196dd5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -19,6 +19,7 @@
 
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.SlowDiskReports;
 
 
@@ -50,8 +51,15 @@
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.test.Whitebox;
+import org.mockito.ArgumentMatcher;
+import org.mockito.ArgumentMatchers;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 import static 
org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer.FSIMAGE_ATTRIBUTE_KEY;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.spy;
 
 /**
  * This is a utility class to expose NameNode functionality for unit tests.
@@ -82,13 +90,13 @@ public static HdfsFileStatus getFileInfo(NameNode namenode, 
String src,
     // consistent with FSNamesystem#getFileInfo()
     final String operationName = needBlockToken ? "open" : "getfileinfo";
     FSPermissionChecker.setOperationType(operationName);
-    namenode.getNamesystem().readLock();
+    namenode.getNamesystem().readLock(FSNamesystemLockMode.FS);
     try {
       return FSDirStatAndListingOp.getFileInfo(namenode.getNamesystem()
           .getFSDirectory(), pc, src, resolveLink, needLocation,
           needBlockToken);
     } finally {
-      namenode.getNamesystem().readUnlock();
+      namenode.getNamesystem().readUnlock(FSNamesystemLockMode.FS, 
"getFileInfo");
     }
   }
   
@@ -201,11 +209,11 @@ public static HAServiceState getServiceState(NameNode nn) 
{
    */
   public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
       DatanodeID id) throws IOException {
-    ns.readLock();
+    ns.readLock(FSNamesystemLockMode.BM);
     try {
       return ns.getBlockManager().getDatanodeManager().getDatanode(id);
     } finally {
-      ns.readUnlock();
+      ns.readUnlock(FSNamesystemLockMode.BM, "getDatanode");
     }
   }
   
@@ -229,7 +237,7 @@ public static long getImpendingGenerationStamp(final 
FSNamesystem fsn) {
   public static BlockInfo addBlockNoJournal(final FSNamesystem fsn,
       final String src, final DatanodeStorageInfo[] targets)
       throws IOException {
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
     try {
       INodeFile file = (INodeFile)fsn.getFSDirectory().getINode(src);
       Block newBlock = fsn.createNewBlock(BlockType.CONTIGUOUS);
@@ -238,17 +246,17 @@ public static BlockInfo addBlockNoJournal(final 
FSNamesystem fsn,
           fsn, src, inodesInPath, newBlock, targets, BlockType.CONTIGUOUS);
       return file.getLastBlock();
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "addBlockNoJournal");
     }
   }
 
   public static void persistBlocks(final FSNamesystem fsn,
       final String src, final INodeFile file) throws IOException {
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.FS);
     try {
       FSDirWriteFileOp.persistBlocks(fsn.getFSDirectory(), src, file, true);
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.FS, "persistBlocks");
     }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
index 1a763b5bae3..dd944d08b94 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
@@ -24,6 +24,7 @@
 import static org.junit.Assert.assertTrue;
 import java.io.IOException;
 import java.util.EnumSet;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -34,6 +35,7 @@
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.junit.After;
@@ -91,7 +93,7 @@ public void testRetryAddBlockWhileInChooseTarget() throws 
Exception {
     // start first addBlock()
     LOG.info("Starting first addBlock for " + src);
     LocatedBlock[] onRetryBlock = new LocatedBlock[1];
-    ns.readLock();
+    ns.readLock(FSNamesystemLockMode.GLOBAL);
     FSDirWriteFileOp.ValidateAddBlockResult r;
     FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
     try {
@@ -99,7 +101,7 @@ public void testRetryAddBlockWhileInChooseTarget() throws 
Exception {
                                             HdfsConstants.GRANDFATHER_INODE_ID,
                                             "clientName", null, onRetryBlock);
     } finally {
-      ns.readUnlock();
+      ns.readUnlock(FSNamesystemLockMode.GLOBAL, "validateAddBlock");
     }
     DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
         ns.getBlockManager(), src, null, null, null, r);
@@ -117,13 +119,13 @@ public void testRetryAddBlockWhileInChooseTarget() throws 
Exception {
     assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
 
     // continue first addBlock()
-    ns.writeLock();
+    ns.writeLock(FSNamesystemLockMode.GLOBAL);
     LocatedBlock newBlock;
     try {
       newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src,
           HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
     } finally {
-      ns.writeUnlock();
+      ns.writeUnlock(FSNamesystemLockMode.GLOBAL, 
"testRetryAddBlockWhileInChooseTarget");
     }
     assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
 
@@ -137,11 +139,11 @@ public void testRetryAddBlockWhileInChooseTarget() throws 
Exception {
 
   boolean checkFileProgress(String src, boolean checkall) throws IOException {
     final FSNamesystem ns = cluster.getNamesystem();
-    ns.readLock();
+    ns.readLock(FSNamesystemLockMode.GLOBAL);
     try {
       return ns.checkFileProgress(src, ns.dir.getINode(src).asFile(), 
checkall);
     } finally {
-      ns.readUnlock();
+      ns.readUnlock(FSNamesystemLockMode.GLOBAL, "checkFileProgress");
     }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
index aad8e9b96a0..336409c4b50 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddOverReplicatedStripedBlocks.java
@@ -33,6 +33,7 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -204,12 +205,13 @@ public void 
testProcessOverReplicatedAndCorruptStripedBlock()
     BlockManager bm = cluster.getNamesystem().getBlockManager();
     List<DatanodeInfo> infos = Arrays.asList(bg.getLocations());
     List<String> storages = Arrays.asList(bg.getStorageIDs());
-    cluster.getNamesystem().writeLock();
+    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
       bm.findAndMarkBlockAsCorrupt(lbs.getLastLocatedBlock().getBlock(),
           infos.get(0), storages.get(0), "TEST");
     } finally {
-      cluster.getNamesystem().writeUnlock();
+      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testProcessOverReplicatedAndCorruptStripedBlock");
     }
     assertEquals(1, bm.countNodes(bm.getStoredBlock(blockInfo))
         .corruptReplicas());
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
index 3beea47800d..60024ca64a4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.net.StaticMapping;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -253,11 +254,12 @@ public void 
testPlacementWithOnlyOneNodeInRackDecommission() throws Exception {
 
     //test if decommission succeeded
     DatanodeDescriptor dnd3 = 
dnm.getDatanode(cluster.getDataNodes().get(3).getDatanodeId());
-    cluster.getNamesystem().writeLock();
+    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
       dm.getDatanodeAdminManager().startDecommission(dnd3);
     } finally {
-      cluster.getNamesystem().writeUnlock();
+      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+          "testPlacementWithOnlyOneNodeInRackDecommission");
     }
 
     // make sure the decommission finishes and the block in on 4 racks
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
index 2d45ee81a64..447bb5752f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java
@@ -44,6 +44,7 @@
 import java.util.List;
 
 import org.apache.commons.lang3.time.DateUtils;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -762,7 +763,7 @@ private static void waitForCachedBlocks(NameNode nn,
       @Override
       public Boolean get() {
         int numCachedBlocks = 0, numCachedReplicas = 0;
-        namesystem.readLock();
+        namesystem.readLock(FSNamesystemLockMode.BM);
         try {
           GSet<CachedBlock, CachedBlock> cachedBlocks =
               cacheManager.getCachedBlocks();
@@ -775,7 +776,7 @@ public Boolean get() {
             }
           }
         } finally {
-          namesystem.readUnlock();
+          namesystem.readUnlock(FSNamesystemLockMode.BM, "checkBlocks");
         }
 
         LOG.info(logString + " cached blocks: have " + numCachedBlocks +
@@ -1506,7 +1507,7 @@ public void testMaxRelativeExpiry() throws Exception {
   private void checkPendingCachedEmpty(MiniDFSCluster cluster)
       throws Exception {
     Thread.sleep(1000);
-    cluster.getNamesystem().readLock();
+    cluster.getNamesystem().readLock(FSNamesystemLockMode.BM);
     try {
       final DatanodeManager datanodeManager =
           cluster.getNamesystem().getBlockManager().getDatanodeManager();
@@ -1519,7 +1520,7 @@ private void checkPendingCachedEmpty(MiniDFSCluster 
cluster)
             descriptor.getPendingCached().isEmpty());
       }
     } finally {
-      cluster.getNamesystem().readUnlock();
+      cluster.getNamesystem().readUnlock(FSNamesystemLockMode.BM, 
"checkPendingCachedEmpty");
     }
   }
 
@@ -1666,9 +1667,9 @@ public void testExpiryTimeConsistency() throws Exception {
     HATestUtil.waitForStandbyToCatchUp(ann, sbn);
     GenericTestUtils.waitFor(() -> {
       boolean isConsistence = false;
-      ann.getNamesystem().readLock();
+      ann.getNamesystem().readLock(FSNamesystemLockMode.FS);
       try {
-        sbn.getNamesystem().readLock();
+        sbn.getNamesystem().readLock(FSNamesystemLockMode.FS);
         try {
           Iterator<CacheDirective> annDirectivesIt = annCachemanager.
               getCacheDirectives().iterator();
@@ -1683,10 +1684,10 @@ public void testExpiryTimeConsistency() throws 
Exception {
             }
           }
         } finally {
-          sbn.getNamesystem().readUnlock();
+          sbn.getNamesystem().readUnlock(FSNamesystemLockMode.FS, 
"expiryTimeConsistency");
         }
       } finally {
-        ann.getNamesystem().readUnlock();
+        ann.getNamesystem().readUnlock(FSNamesystemLockMode.FS, 
"expiryTimeConsistency");
       }
       if (!isConsistence) {
         LOG.info("testEexpiryTimeConsistency:"
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
index 9d32528bf25..cf16672d61c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
@@ -52,6 +52,7 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.Node;
@@ -471,7 +472,7 @@ public void testOpenRenameRace() throws Exception {
         } catch (InterruptedException e) {
         }
       });
-      fsn.writeLock();
+      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
       open.start();
       openSem.acquire();
       Thread.yield();
@@ -479,7 +480,7 @@ public void testOpenRenameRace() throws Exception {
       rename.start();
       renameSem.acquire();
       Thread.yield();
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testOpenRenameRace");
 
       // wait open and rename threads finish.
       open.join();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
index 0cf696f504b..febcafa8954 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDiskspaceQuotaUpdate.java
@@ -45,6 +45,7 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.ipc.RemoteException;
 import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
@@ -392,11 +393,11 @@ public void testQuotaInitialization() throws Exception {
 
   private void updateCountForQuota(int i) {
     FSNamesystem fsn = cluster.getNamesystem();
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.FS);
     try {
       getFSDirectory().updateCountForQuota(i);
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.FS, "updateCountForQuota");
     }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
index 9dc2cc9182b..936bec67716 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
@@ -41,6 +41,7 @@
 import java.util.concurrent.atomic.AtomicReference;
 
 import java.util.function.Supplier;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -55,6 +56,7 @@
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
 import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SetOwnerOp;
 import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
@@ -523,11 +525,11 @@ public void testSaveRightBeforeSync() throws Exception {
         public void run() {
           try {
             LOG.info("Starting setOwner");
-            namesystem.writeLock();
+            namesystem.writeLock(FSNamesystemLockMode.FS);
             try {
               editLog.logSetOwner("/","test","test");
             } finally {
-              namesystem.writeUnlock();
+              namesystem.writeUnlock(FSNamesystemLockMode.FS, 
"testSaveRightBeforeSync");
             }
             sleepingBeforeSync.countDown();
             LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
index 48a6a2b7779..d502b6de132 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
@@ -32,6 +32,7 @@
 import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
 import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.DiffList;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@@ -151,11 +152,11 @@ private File saveFSImageToTempFile() throws IOException {
         conf);
     FSImageCompression compression = 
FSImageCompression.createCompression(conf);
     File imageFile = getImageFile(testDir, txid);
-    fsn.readLock();
+    fsn.readLock(FSNamesystemLockMode.GLOBAL);
     try {
       saver.save(imageFile, compression);
     } finally {
-      fsn.readUnlock();
+      fsn.readUnlock(FSNamesystemLockMode.GLOBAL, "saveFSImage");
     }
     return imageFile;
   }
@@ -163,14 +164,14 @@ private File saveFSImageToTempFile() throws IOException {
   /** Load the fsimage from a temp file */
   private void loadFSImageFromTempFile(File imageFile) throws IOException {
     FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, fsn);
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
     fsn.getFSDirectory().writeLock();
     try {
       loader.load(imageFile, false);
       fsn.getFSDirectory().updateCountForQuota();
     } finally {
       fsn.getFSDirectory().writeUnlock();
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "loadFSImageFromTempFile");
     }
   }
   
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
index f2f42440240..defbf36e5e1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
@@ -41,6 +41,7 @@
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@@ -194,12 +195,12 @@ public void testReset() throws Exception {
   }
 
   private void clearNamesystem(FSNamesystem fsn) {
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
     try {
       fsn.clear();
       assertFalse(fsn.isImageLoaded());
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "clearNamesystem");
     }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
index 2353dd975a7..a3005ec291c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.metrics2.impl.ConfigBuilder;
 import org.apache.hadoop.metrics2.impl.TestMetricsConfig;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -153,7 +154,7 @@ public void testWithFSNamesystemWriteLock() throws 
Exception {
       cluster.waitActive();
 
       fsn = cluster.getNameNode().namesystem;
-      fsn.writeLock();
+      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
       Thread.sleep(jmxCachePeriod * 1000);
 
       MBeanClient client = new MBeanClient();
@@ -163,8 +164,8 @@ public void testWithFSNamesystemWriteLock() throws 
Exception {
           "is owned by another thread", client.succeeded);
       client.interrupt();
     } finally {
-      if (fsn != null && fsn.hasWriteLock()) {
-        fsn.writeUnlock();
+      if (fsn != null && fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL)) {
+        fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, 
"testWithFSNamesystemWriteLock");
       }
       if (cluster != null) {
         cluster.shutdown();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
index b96f218ceb2..9abb28da7ba 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.fs.CommonPathCapabilities;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.slf4j.Logger;
@@ -1083,7 +1084,7 @@ public void testTruncateRecovery() throws IOException {
     INodeFile file = iip.getLastINode().asFile();
     long initialGenStamp = file.getLastBlock().getGenerationStamp();
     // Test that prepareFileForTruncate sets up in-place truncate.
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
     try {
       Block oldBlock = file.getLastBlock();
       Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
@@ -1103,7 +1104,7 @@ public void testTruncateRecovery() throws IOException {
       fsn.getEditLog().logTruncate(
           src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testTruncateRecovery");
     }
 
     // Re-create file and ensure we are ready to copy on truncate
@@ -1117,7 +1118,7 @@ public void testTruncateRecovery() throws IOException {
         (BlockInfoContiguous) file.getLastBlock()), is(true));
     initialGenStamp = file.getLastBlock().getGenerationStamp();
     // Test that prepareFileForTruncate sets up copy-on-write truncate
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.GLOBAL);
     try {
       Block oldBlock = file.getLastBlock();
       Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
@@ -1137,7 +1138,7 @@ public void testTruncateRecovery() throws IOException {
       fsn.getEditLog().logTruncate(
           src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testTruncateRecovery");
     }
     checkBlockRecovery(srcPath);
     fs.deleteSnapshot(parent, "ss0");
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index 892c5ce020a..b59d7545d08 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -105,6 +105,7 @@
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ReplicationResult;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.ErasureCodingResult;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.tools.DFSck;
@@ -1511,11 +1512,11 @@ public void testBlockIdCKDecommission() throws 
Exception {
     ExtendedBlock eb = util.getFirstBlock(dfs, path);
     BlockCollection bc = null;
     try {
-      fsn.writeLock();
+      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
       BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
       bc = fsn.getBlockCollection(bi);
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, 
"testBlockIdCKDecommission");
     }
     DatanodeDescriptor dn = bc.getBlocks()[0].getDatanode(0);
     bm.getDatanodeManager().getDatanodeAdminManager().startDecommission(dn);
@@ -1953,11 +1954,11 @@ public void testFsckWithDecommissionedReplicas() throws 
Exception {
     ExtendedBlock eb = util.getFirstBlock(dfs, path);
     BlockCollection bc = null;
     try {
-      fsn.writeLock();
+      fsn.writeLock(FSNamesystemLockMode.GLOBAL);
       BlockInfo bi = bm.getStoredBlock(eb.getLocalBlock());
       bc = fsn.getBlockCollection(bi);
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, 
"testFsckWithDecommissionedReplicas");
     }
     DatanodeDescriptor dn = bc.getBlocks()[0]
         .getDatanode(0);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
index 1975549415e..fe5e88e3256 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestGetBlockLocations.java
@@ -23,6 +23,7 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSDirectory.DirOp;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.junit.Test;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
@@ -72,14 +73,15 @@ public void testGetBlockLocationsRacingWithDelete() throws 
IOException {
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
         if(!deleted[0]) {
-          fsn.writeLock();
+          fsn.writeLock(FSNamesystemLockMode.GLOBAL);
           try {
             INodesInPath iip = fsd.getINodesInPath(FILE_PATH, DirOp.READ);
             FSDirDeleteOp.delete(fsd, iip, new INode.BlocksMapUpdateInfo(),
                                  new ArrayList<INode>(), new ArrayList<Long>(),
                                  now());
           } finally {
-            fsn.writeUnlock();
+            fsn.writeUnlock(FSNamesystemLockMode.GLOBAL,
+                "testGetBlockLocationsRacingWithDelete");
           }
           deleted[0] = true;
         }
@@ -106,14 +108,14 @@ public void testGetBlockLocationsRacingWithRename() 
throws IOException {
       @Override
       public Void answer(InvocationOnMock invocation) throws Throwable {
         if (!renamed[0]) {
-          fsn.writeLock();
+          fsn.writeLock(FSNamesystemLockMode.FS);
           try {
             FSDirRenameOp.renameTo(fsd, fsd.getPermissionChecker(), FILE_PATH,
                                    DST_PATH, new INode.BlocksMapUpdateInfo(),
                                    false);
             renamed[0] = true;
           } finally {
-            fsn.writeUnlock();
+            fsn.writeUnlock(FSNamesystemLockMode.FS, 
"testGetBlockLocationsRacingWithRename");
           }
         }
         invocation.callRealMethod();
@@ -142,13 +144,13 @@ private static FSNamesystem setupFileSystem() throws 
IOException {
         perm, 1, 1, new BlockInfo[] {}, (short) 1,
         DFS_BLOCK_SIZE_DEFAULT);
 
-    fsn.writeLock();
+    fsn.writeLock(FSNamesystemLockMode.FS);
     try {
       final FSDirectory fsd = fsn.getFSDirectory();
       INodesInPath iip = fsd.getINodesInPath("/", DirOp.READ);
       fsd.addINode(iip, file, null);
     } finally {
-      fsn.writeUnlock();
+      fsn.writeUnlock(FSNamesystemLockMode.FS, "setupFileSystem");
     }
     return fsn;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
index 42f054006c9..06cdc8c4636 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
@@ -21,6 +21,7 @@
 import java.util.Random;
 
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
@@ -120,11 +121,11 @@ protected void execute() throws Throwable {
           try {
             int blockcount = getBlockCount();
             if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
-              mc.getNamesystem().writeLock();
+              mc.getNamesystem().writeLock(FSNamesystemLockMode.GLOBAL);
               try {
                 lockOps++;
               } finally {
-                mc.getNamesystem().writeUnlock();
+                mc.getNamesystem().writeUnlock(FSNamesystemLockMode.GLOBAL, 
"runThreads");
               }
               Thread.sleep(1);
             }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
index c60a1369bd9..9a5a9a9e76b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListOpenFiles.java
@@ -47,6 +47,7 @@
 import org.apache.hadoop.hdfs.protocol.OpenFileEntry;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator;
 import org.apache.hadoop.hdfs.protocol.OpenFilesIterator.OpenFilesType;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -338,7 +339,7 @@ public void testListOpenFilesWithDeletedPath() throws 
Exception {
     FSDirectory dir = fsNamesystem.getFSDirectory();
     List<INode> removedINodes = new ChunkedArrayList<>();
     removedINodes.add(dir.getINode(path));
-    fsNamesystem.writeLock();
+    fsNamesystem.writeLock(FSNamesystemLockMode.FS);
     try {
       dir.removeFromInodeMap(removedINodes);
       openFileEntryBatchedEntries = nnRpc
@@ -349,7 +350,7 @@ public void testListOpenFilesWithDeletedPath() throws 
Exception {
     } catch (NullPointerException e) {
       Assert.fail("Should not throw NPE when the file is deleted but has 
lease!");
     } finally {
-      fsNamesystem.writeUnlock();
+      fsNamesystem.writeUnlock(FSNamesystemLockMode.FS, 
"testListOpenFilesWithDeletedPath");
     }
   }
 }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
index 3e80091307c..dac99f40df8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMetadataConsistency.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.test.GenericTestUtils;
 
 import java.util.function.Supplier;
@@ -95,13 +96,14 @@ public void testGenerationStampInFuture() throws Exception {
 
     // Simulate Namenode forgetting a Block
     cluster.restartNameNode(true);
-    cluster.getNameNode().getNamesystem().writeLock();
+    cluster.getNameNode().getNamesystem().writeLock(FSNamesystemLockMode.BM);
     BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager()
         .getStoredBlock(block.getLocalBlock());
     bInfo.delete();
     cluster.getNameNode().getNamesystem().getBlockManager()
         .removeBlock(bInfo);
-    cluster.getNameNode().getNamesystem().writeUnlock();
+    cluster.getNameNode().getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+        "testGenerationStampInFuture");
 
     // we also need to tell block manager that we are in the startup path
     BlockManagerTestUtil.setStartupSafeModeForTest(
@@ -145,11 +147,12 @@ public void testEnsureGenStampsIsStartupOnly() throws 
Exception {
     cluster.restartNameNode(true);
     BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager
         ().getStoredBlock(block.getLocalBlock());
-    cluster.getNameNode().getNamesystem().writeLock();
+    cluster.getNameNode().getNamesystem().writeLock(FSNamesystemLockMode.BM);
     bInfo.delete();
     cluster.getNameNode().getNamesystem().getBlockManager()
         .removeBlock(bInfo);
-    cluster.getNameNode().getNamesystem().writeUnlock();
+    cluster.getNameNode().getNamesystem().writeUnlock(FSNamesystemLockMode.BM,
+        "testEnsureGenStampsIsStartupOnly");
 
     cluster.restartDataNode(dnProps);
     waitForNumBytes(0);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
index 67ab88ea726..5cdfa2942f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestReconstructStripedBlocks.java
@@ -44,6 +44,7 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockECReconstructionCommand.BlockECReconstructionInfo;
 
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
@@ -338,13 +339,13 @@ public void testCountLiveReplicas() throws Exception {
       boolean reconstructed = false;
       for (int i = 0; i < 5; i++) {
         NumberReplicas num = null;
-        fsn.readLock();
+        fsn.readLock(FSNamesystemLockMode.GLOBAL);
         try {
           BlockInfo blockInfo = cluster.getNamesystem().getFSDirectory()
               .getINode4Write(filePath.toString()).asFile().getLastBlock();
           num = bm.countNodes(blockInfo);
         } finally {
-          fsn.readUnlock();
+          fsn.readUnlock(FSNamesystemLockMode.GLOBAL, "testCountLiveReplicas");
         }
         if (num.liveReplicas() >= groupSize) {
           reconstructed = true;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
index c43c909c98a..e6eccbfb77e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
@@ -35,6 +35,7 @@
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
@@ -196,8 +197,8 @@ public void testEditsForCancelOnTokenExpire() throws 
IOException,
         @Override
         public Void answer(InvocationOnMock invocation) throws Throwable {
           // fsn claims read lock if either read or write locked.
-          Assert.assertTrue(fsnRef.get().hasReadLock());
-          Assert.assertFalse(fsnRef.get().hasWriteLock());
+          Assert.assertTrue(fsnRef.get().hasReadLock(FSNamesystemLockMode.FS));
+          
Assert.assertFalse(fsnRef.get().hasWriteLock(FSNamesystemLockMode.FS));
           return null;
         }
       }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
index 9d79e496102..f27864283f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
@@ -52,6 +52,7 @@
 import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
@@ -568,13 +569,13 @@ private void banner(String string) {
   }
 
   private void doMetasave(NameNode nn2) {
-    nn2.getNamesystem().writeLock();
+    nn2.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
       PrintWriter pw = new PrintWriter(System.err);
       nn2.getNamesystem().getBlockManager().metaSave(pw);
       pw.flush();
     } finally {
-      nn2.getNamesystem().writeUnlock();
+      nn2.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, "metaSave");
     }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 3a63f97b8e1..2b64446b0d2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -50,6 +50,7 @@
 import java.util.Random;
 
 import org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableList;
 
@@ -494,12 +495,12 @@ public void testCorruptBlock() throws Exception {
     // Corrupt first replica of the block
     LocatedBlock block = NameNodeAdapter.getBlockLocations(
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
-    cluster.getNamesystem().writeLock();
+    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
           "STORAGE_ID", "TEST");
     } finally {
-      cluster.getNamesystem().writeUnlock();
+      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, 
"testCorruptBlock");
     }
 
     BlockManagerTestUtil.updateState(bm);
@@ -588,12 +589,12 @@ public void testStripedFileCorruptBlocks() throws 
Exception {
     assert lbs.get(0) instanceof LocatedStripedBlock;
     LocatedStripedBlock bg = (LocatedStripedBlock) (lbs.get(0));
 
-    cluster.getNamesystem().writeLock();
+    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
       bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
           "STORAGE_ID", "TEST");
     } finally {
-      cluster.getNamesystem().writeUnlock();
+      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, 
"testStripedFileCorruptBlocks");
     }
 
     BlockManagerTestUtil.updateState(bm);
@@ -687,12 +688,12 @@ public void testMissingBlock() throws Exception {
     // Corrupt the only replica of the block to result in a missing block
     LocatedBlock block = NameNodeAdapter.getBlockLocations(
         cluster.getNameNode(), file.toString(), 0, 1).get(0);
-    cluster.getNamesystem().writeLock();
+    cluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
     try {
       bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
           "STORAGE_ID", "TEST");
     } finally {
-      cluster.getNamesystem().writeUnlock();
+      cluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, 
"testMissingBlock");
     }
     Thread.sleep(1000); // Wait for block to be marked corrupt
     MetricsRecordBuilder rb = getMetrics(NS_METRICS);
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
index 4f32af9b9a4..e9ea5b6d9b9 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
@@ -46,6 +46,7 @@
 import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.slf4j.event.Level;
@@ -297,10 +298,10 @@ public void testLease() throws Exception {
       hdfs.delete(foo, true);
       Thread.sleep(1000);
       try {
-        fsn.writeLock();
+        fsn.writeLock(FSNamesystemLockMode.GLOBAL);
         NameNodeAdapter.getLeaseManager(fsn).runLeaseChecks();
       } finally {
-        fsn.writeUnlock();
+        fsn.writeUnlock(FSNamesystemLockMode.GLOBAL, "testLease");
       }
     } finally {
       NameNodeAdapter.setLeasePeriod(fsn, HdfsConstants.LEASE_SOFTLIMIT_PERIOD,
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
index 392866e86ea..75e89f4f2aa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java
@@ -81,6 +81,7 @@
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.AccessControlException;
@@ -783,14 +784,14 @@ public void testReportCommand() throws Exception {
       LocatedStripedBlock bg =
           (LocatedStripedBlock)(lbs.get(0));
 
-      miniCluster.getNamesystem().writeLock();
+      miniCluster.getNamesystem().writeLock(FSNamesystemLockMode.BM);
       try {
         BlockManager bm = miniCluster.getNamesystem().getBlockManager();
         bm.findAndMarkBlockAsCorrupt(bg.getBlock(), bg.getLocations()[0],
             "STORAGE_ID", "TEST");
         BlockManagerTestUtil.updateState(bm);
       } finally {
-        miniCluster.getNamesystem().writeUnlock();
+        miniCluster.getNamesystem().writeUnlock(FSNamesystemLockMode.BM, 
"testReportCommand");
       }
       waitForCorruptBlock(miniCluster, client, file);
 
diff --git 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
index 361243fd696..7056034f3f3 100644
--- 
a/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
+++ 
b/hadoop-tools/hadoop-fs2img/src/test/java/org/apache/hadoop/hdfs/server/namenode/ITestProvidedImplementation.java
@@ -84,6 +84,7 @@
 
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsVolumeImpl;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.ha.BootstrapStandby;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.ipc.RemoteException;
@@ -1094,26 +1095,26 @@ private DatanodeDescriptor 
getDatanodeDescriptor(DatanodeManager dnm,
 
   private void startDecommission(FSNamesystem namesystem, DatanodeManager dnm,
       int dnIndex) throws Exception {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     dnm.getDatanodeAdminManager().startDecommission(dnDesc);
-    namesystem.writeUnlock();
+    namesystem.writeUnlock(FSNamesystemLockMode.BM, "startDecommission");
   }
 
   private void startMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
       int dnIndex) throws Exception {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     dnm.getDatanodeAdminManager().startMaintenance(dnDesc, Long.MAX_VALUE);
-    namesystem.writeUnlock();
+    namesystem.writeUnlock(FSNamesystemLockMode.BM, "startMaintenance");
   }
 
   private void stopMaintenance(FSNamesystem namesystem, DatanodeManager dnm,
       int dnIndex) throws Exception {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
     DatanodeDescriptor dnDesc = getDatanodeDescriptor(dnm, dnIndex);
     dnm.getDatanodeAdminManager().stopMaintenance(dnDesc);
-    namesystem.writeUnlock();
+    namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "stopMaintenance");
   }
 
   @Test


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org


Reply via email to