This is an automated email from the ASF dual-hosted git repository.

zanderxu pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git

commit 02b149da67eb311b959c079a8967e29639b8e825
Author: ZanderXu <zande...@apache.org>
AuthorDate: Wed Apr 10 10:08:24 2024 +0800

    HDFS-17445. [FGL] some operations support fine-grained locking (#6715)
---
 .../hdfs/server/blockmanagement/BlockManager.java  | 10 +++---
 .../hadoop/hdfs/server/namenode/BackupImage.java   |  5 +--
 .../hdfs/server/namenode/FSDirErasureCodingOp.java |  2 +-
 .../hdfs/server/namenode/FSDirTruncateOp.java      |  2 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  | 40 +++++++++++-----------
 .../hdfs/server/namenode/FSPermissionChecker.java  | 15 ++++----
 .../hdfs/server/namenode/FSTreeTraverser.java      |  7 ++--
 .../hdfs/server/namenode/FsImageValidation.java    |  5 +--
 .../hadoop/hdfs/server/namenode/LeaseManager.java  | 12 +++----
 .../hadoop/hdfs/server/namenode/NameNode.java      | 25 +++++++-------
 .../hadoop/hdfs/server/namenode/NamenodeFsck.java  | 10 +++---
 .../hdfs/server/namenode/SecondaryNameNode.java    |  5 +--
 .../hdfs/server/namenode/TestLeaseManager.java     |  2 ++
 13 files changed, 75 insertions(+), 65 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 69438c7a647..19086ee0db3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2795,7 +2795,7 @@ public long getBytesInFutureECBlockGroups() {
    *               list of blocks that need to be removed from blocksMap
    */
   public void removeBlocksAndUpdateSafemodeTotal(BlocksMapUpdateInfo blocks) {
-    assert namesystem.hasWriteLock();
+    assert namesystem.hasWriteLock(FSNamesystemLockMode.BM);
     // In the case that we are a Standby tailing edits from the
     // active while in safe-mode, we need to track the total number
     // of blocks and safe blocks in the system.
@@ -4119,7 +4119,7 @@ public int processMisReplicatedBlocks(List<BlockInfo> 
blocks) {
               && !Thread.currentThread().isInterrupted()
               && iter.hasNext()) {
         int limit = processed + numBlocksPerIteration;
-        namesystem.writeLockInterruptibly();
+        namesystem.writeLockInterruptibly(FSNamesystemLockMode.GLOBAL);
         try {
           while (iter.hasNext() && processed < limit) {
             BlockInfo blk = iter.next();
@@ -4129,7 +4129,7 @@ public int processMisReplicatedBlocks(List<BlockInfo> 
blocks) {
                 blk, r);
           }
         } finally {
-          namesystem.writeUnlock("processMisReplicatedBlocks");
+          namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, 
"processMisReplicatedBlocks");
         }
       }
     } catch (InterruptedException ex) {
@@ -5672,7 +5672,7 @@ private void processQueue() {
           // batch as many operations in the write lock until the queue
           // runs dry, or the max lock hold is reached.
           int processed = 0;
-          namesystem.writeLock();
+          namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
           metrics.setBlockOpsQueued(queue.size() + 1);
           try {
             long start = Time.monotonicNow();
@@ -5685,7 +5685,7 @@ private void processQueue() {
               action = queue.poll();
             } while (action != null);
           } finally {
-            namesystem.writeUnlock("processQueue");
+            namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, 
"processQueue");
             metrics.addBlockOpsBatched(processed - 1);
           }
         } catch (InterruptedException e) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
index 7bde21dcb69..fc1ff98a400 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageState;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Lists;
 import org.apache.hadoop.util.StringUtils;
 
@@ -218,11 +219,11 @@ private synchronized void applyEdits(long firstTxId, int 
numTxns, byte[] data)
       }
       lastAppliedTxId = logLoader.getLastAppliedTxId();
 
-      getNamesystem().writeLock();
+      getNamesystem().writeLock(FSNamesystemLockMode.FS);
       try {
         getNamesystem().dir.updateCountForQuota();
       } finally {
-        getNamesystem().writeUnlock("applyEdits");
+        getNamesystem().writeUnlock(FSNamesystemLockMode.FS, "applyEdits");
       }
     } finally {
       backupInputStream.clear();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 6237862ed4b..8fe05a9463a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -430,7 +430,7 @@ static ErasureCodingPolicy 
unprotectedGetErasureCodingPolicy(
    */
   static ErasureCodingPolicyInfo[] getErasureCodingPolicies(
       final FSNamesystem fsn) throws IOException {
-    assert fsn.hasReadLock();
+    assert fsn.hasReadLock(FSNamesystemLockMode.FS);
     return fsn.getErasureCodingPolicyManager().getPolicies();
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
index e956f9ca711..8c7007e4239 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
@@ -176,7 +176,7 @@ static void unprotectedTruncate(final FSNamesystem fsn,
       final long newLength, final long mtime, final Block truncateBlock)
       throws UnresolvedLinkException, QuotaExceededException,
       SnapshotAccessControlException, IOException {
-    assert fsn.hasWriteLock();
+    assert fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL);
 
     FSDirectory fsd = fsn.getFSDirectory();
     INodeFile file = iip.getLastINode().asFile();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 1de284e65b5..a8347668dc0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -705,12 +705,12 @@ void imageLoadComplete() {
 
   void setImageLoaded() {
     if(imageLoaded) return;
-    writeLock();
+    writeLock(FSNamesystemLockMode.GLOBAL);
     try {
       setImageLoaded(true);
       dir.markNameCacheInitialized();
     } finally {
-      writeUnlock("setImageLoaded");
+      writeUnlock(FSNamesystemLockMode.GLOBAL, "setImageLoaded");
     }
   }
 
@@ -1288,7 +1288,7 @@ void loadFSImage(StartupOption startOpt) throws 
IOException {
       startOpt = StartupOption.REGULAR;
     }
     boolean success = false;
-    writeLock();
+    writeLock(FSNamesystemLockMode.GLOBAL);
     try {
       // We shouldn't be calling saveNamespace if we've come up in standby 
state.
       MetaRecoveryContext recovery = startOpt.createRecoveryContext();
@@ -1320,7 +1320,7 @@ void loadFSImage(StartupOption startOpt) throws 
IOException {
       if (!success) {
         fsImage.close();
       }
-      writeUnlock("loadFSImage", true);
+      writeUnlock(FSNamesystemLockMode.GLOBAL, "loadFSImage", true);
     }
     imageLoadComplete();
   }
@@ -1391,7 +1391,7 @@ void startCommonServices(Configuration conf, HAContext 
haContext) throws IOExcep
    * Stop services common to both active and standby states
    */
   void stopCommonServices() {
-    writeLock();
+    writeLock(FSNamesystemLockMode.GLOBAL);
     if (inodeAttributeProvider != null) {
       dir.setINodeAttributeProvider(null);
       inodeAttributeProvider.stop();
@@ -1399,7 +1399,7 @@ void stopCommonServices() {
     try {
       if (blockManager != null) blockManager.close();
     } finally {
-      writeUnlock("stopCommonServices");
+      writeUnlock(FSNamesystemLockMode.GLOBAL, "stopCommonServices");
     }
     RetryCache.clear(retryCache);
   }
@@ -1529,7 +1529,7 @@ private boolean shouldUseDelegationTokens() {
    */
   void stopActiveServices() {
     LOG.info("Stopping services started for active state");
-    writeLock();
+    writeLock(FSNamesystemLockMode.GLOBAL);
     try {
       if (blockManager != null) {
         blockManager.stopReconstructionInitializer();
@@ -1577,7 +1577,7 @@ void stopActiveServices() {
         blockManager.setInitializedReplQueues(false);
       }
     } finally {
-      writeUnlock("stopActiveServices");
+      writeUnlock(FSNamesystemLockMode.GLOBAL, "stopActiveServices");
     }
   }
   
@@ -4666,7 +4666,7 @@ private void clearCorruptLazyPersistFiles()
 
       List<BlockCollection> filesToDelete = new ArrayList<>();
       boolean changed = false;
-      writeLock();
+      writeLock(FSNamesystemLockMode.GLOBAL);
       try {
         final Iterator<BlockInfo> it =
             blockManager.getCorruptReplicaBlockIterator();
@@ -4697,7 +4697,7 @@ private void clearCorruptLazyPersistFiles()
           }
         }
       } finally {
-        writeUnlock("clearCorruptLazyPersistFiles");
+        writeUnlock(FSNamesystemLockMode.GLOBAL, 
"clearCorruptLazyPersistFiles");
       }
       if (changed) {
         getEditLog().logSync();
@@ -5001,12 +5001,12 @@ public long getNumOfWriteLockLongHold() {
   }
 
   int getNumberOfDatanodes(DatanodeReportType type) {
-    readLock();
+    readLock(FSNamesystemLockMode.BM);
     try {
       return getBlockManager().getDatanodeManager().getDatanodeListForReport(
           type).size(); 
     } finally {
-      readUnlock("getNumberOfDatanodes");
+      readUnlock(FSNamesystemLockMode.BM, "getNumberOfDatanodes");
     }
   }
 
@@ -5235,12 +5235,12 @@ public long getNumActiveClients() {
   public long getCompleteBlocksTotal() {
     // Calculate number of blocks under construction
     long numUCBlocks = 0;
-    readLock();
+    readLock(FSNamesystemLockMode.GLOBAL);
     try {
       numUCBlocks = leaseManager.getNumUnderConstructionBlocks();
       return getBlocksTotal() - numUCBlocks;
     } finally {
-      readUnlock("getCompleteBlocksTotal");
+      readUnlock(FSNamesystemLockMode.GLOBAL, "getCompleteBlocksTotal");
     }
   }
 
@@ -7678,7 +7678,7 @@ public RollingUpgradeInfo.Bean getRollingUpgradeStatus() {
     if (upgradeInfo.createdRollbackImages()) {
       return new RollingUpgradeInfo.Bean(upgradeInfo);
     }
-    readLock();
+    readLock(FSNamesystemLockMode.FS);
     try {
       // check again after acquiring the read lock.
       upgradeInfo = getRollingUpgradeInfo();
@@ -7692,7 +7692,7 @@ public RollingUpgradeInfo.Bean getRollingUpgradeStatus() {
     } catch (IOException ioe) {
       LOG.warn("Encountered exception setting Rollback Image", ioe);
     } finally {
-      readUnlock("getRollingUpgradeStatus");
+      readUnlock(FSNamesystemLockMode.FS, "getRollingUpgradeStatus");
     }
     return new RollingUpgradeInfo.Bean(upgradeInfo);
   }
@@ -7775,7 +7775,7 @@ RollingUpgradeInfo finalizeRollingUpgrade() throws 
IOException {
     final String operationName = "finalizeRollingUpgrade";
     checkSuperuserPrivilege(operationName);
     checkOperation(OperationCategory.WRITE);
-    writeLock();
+    writeLock(FSNamesystemLockMode.FS);
     try {
       checkOperation(OperationCategory.WRITE);
       if (!isRollingUpgrade()) {
@@ -7793,7 +7793,7 @@ RollingUpgradeInfo finalizeRollingUpgrade() throws 
IOException {
       getFSImage().renameCheckpoint(NameNodeFile.IMAGE_ROLLBACK,
           NameNodeFile.IMAGE);
     } finally {
-      writeUnlock(operationName, getLockReportInfoSupplier(null));
+      writeUnlock(FSNamesystemLockMode.FS, operationName, 
getLockReportInfoSupplier(null));
     }
 
     if (!haEnabled) {
@@ -8768,7 +8768,7 @@ void removeXAttr(String src, XAttr xAttr, boolean 
logRetryCache)
 
   @Override
   public void removeXattr(long id, String xattrName) throws IOException {
-    writeLock();
+    writeLock(FSNamesystemLockMode.FS);
     try {
       final INode inode = dir.getInode(id);
       if (inode == null) {
@@ -8784,7 +8784,7 @@ public void removeXattr(long id, String xattrName) throws 
IOException {
         FSDirSatisfyStoragePolicyOp.removeSPSXattr(dir, inode, spsXAttr);
       }
     } finally {
-      writeUnlock("removeXAttr");
+      writeUnlock(FSNamesystemLockMode.FS, "removeXAttr");
     }
     getEditLog().logSync();
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
index b432be39897..e8169606577 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSPermissionChecker.java
@@ -25,6 +25,7 @@
 import java.util.Stack;
 import java.util.function.LongFunction;
 
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.util.Preconditions;
 import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.util.Time;
@@ -49,7 +50,7 @@
  * The state of this class need not be synchronized as it has data structures 
that
  * are read-only.
  * 
- * Some of the helper methods are guarded by {@link FSNamesystem#readLock()}.
+ * Some of the helper methods are guarded by {@link 
FSNamesystem#readLock(FSNamesystemLockMode)}.
  */
 public class FSPermissionChecker implements AccessControlEnforcer {
   static final Logger LOG = 
LoggerFactory.getLogger(UserGroupInformation.class);
@@ -341,7 +342,7 @@ public void denyUserAccess(String path, String errorMessage)
    * @param ignoreEmptyDir Ignore permission checking for empty directory?
    * @throws AccessControlException
    * 
-   * Guarded by {@link FSNamesystem#readLock()}
+   * Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}
    * Caller of this method must hold that lock.
    */
   void checkPermission(INodesInPath inodesInPath, boolean doCheckOwner,
@@ -554,7 +555,7 @@ private INodeAttributes getINodeAttrs(byte[][] 
pathByNameArr, int pathIdx,
     return inodeAttrs;
   }
 
-  /** Guarded by {@link FSNamesystem#readLock()} */
+  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}. */
   private void checkOwner(INodeAttributes[] inodes, byte[][] components, int i)
       throws AccessControlException {
     if (getUser().equals(inodes[i].getUserName())) {
@@ -565,7 +566,7 @@ private void checkOwner(INodeAttributes[] inodes, byte[][] 
components, int i)
         " is not the owner of inode=" + getPath(components, 0, i));
   }
 
-  /** Guarded by {@link FSNamesystem#readLock()}
+  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}.
    * @throws AccessControlException
    * @throws ParentNotDirectoryException
    * @throws UnresolvedPathException
@@ -579,7 +580,7 @@ private void checkTraverse(INodeAttributes[] inodeAttrs, 
INode[] inodes,
     }
   }
 
-  /** Guarded by {@link FSNamesystem#readLock()} */
+  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}. */
   private void checkSubAccess(byte[][] components, int pathIdx,
       INode inode, int snapshotId, FsAction access, boolean ignoreEmptyDir)
       throws AccessControlException {
@@ -653,7 +654,7 @@ private void checkSubAccess(byte[][] components, int 
pathIdx,
     }
   }
 
-  /** Guarded by {@link FSNamesystem#readLock()} */
+  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}. */
   private void check(INodeAttributes[] inodes, byte[][] components, int i,
       FsAction access) throws AccessControlException {
     INodeAttributes inode = (i >= 0) ? inodes[i] : null;
@@ -767,7 +768,7 @@ private boolean hasAclPermission(INodeAttributes inode,
     return !foundMatch && mode.getOtherAction().implies(access);
   }
 
-  /** Guarded by {@link FSNamesystem#readLock()} */
+  /** Guarded by {@link FSNamesystem#readLock(FSNamesystemLockMode)}. */
   private void checkStickyBit(INodeAttributes[] inodes, byte[][] components,
       int index) throws AccessControlException {
     INodeAttributes parent = inodes[index];
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
index c1d26f40c08..12b04d221a7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSTreeTraverser.java
@@ -28,6 +28,7 @@
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
 import org.apache.hadoop.util.Timer;
@@ -127,7 +128,7 @@ protected INode traverseDirInt(final long startId, INode 
curr,
       List<byte[]> startAfters, final TraverseInfo traverseInfo)
       throws IOException, InterruptedException {
     assert dir.hasReadLock();
-    assert dir.getFSNamesystem().hasReadLock();
+    assert dir.getFSNamesystem().hasReadLock(FSNamesystemLockMode.FS);
     long lockStartTime = timer.monotonicNow();
     Preconditions.checkNotNull(curr, "Current inode can't be null");
     checkINodeReady(startId);
@@ -261,13 +262,13 @@ private INode resolvePaths(final long startId, 
List<byte[]> startAfters)
   }
 
   protected void readLock() {
-    dir.getFSNamesystem().readLock();
+    dir.getFSNamesystem().readLock(FSNamesystemLockMode.FS);
     dir.readLock();
   }
 
   protected void readUnlock() {
     dir.readUnlock();
-    dir.getFSNamesystem().readUnlock("FSTreeTraverser");
+    dir.getFSNamesystem().readUnlock(FSNamesystemLockMode.FS, 
"FSTreeTraverser");
   }
 
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
index 4dac221e4d2..0a3d3412f13 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsImageValidation.java
@@ -26,6 +26,7 @@
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
 import org.apache.hadoop.hdfs.server.namenode.top.metrics.TopMetrics;
@@ -273,14 +274,14 @@ public void run() {
 
       final FSImageFormat.LoaderDelegator loader
           = FSImageFormat.newLoader(conf, namesystem);
-      namesystem.writeLock();
+      namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
       namesystem.getFSDirectory().writeLock();
       try {
         loader.load(fsImageFile, false);
         fsImage.setLastAppliedTxId(loader);
       } finally {
         namesystem.getFSDirectory().writeUnlock();
-        namesystem.writeUnlock("loadImage");
+        namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "loadImage");
       }
     }
     t.cancel();
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
index c2e01ffe266..df1dc354985 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
@@ -136,8 +136,8 @@ Lease getLease(String holder) {
    * calling this method.
    */
   synchronized long getNumUnderConstructionBlocks() {
-    assert this.fsnamesystem.hasReadLock() : "The FSNamesystem read lock 
wasn't"
-      + "acquired before counting under construction blocks";
+    assert this.fsnamesystem.hasReadLock(FSNamesystemLockMode.GLOBAL) :
+        "The FSNamesystem read lock wasn't acquired before counting under 
construction blocks";
     long numUCBlocks = 0;
     for (Long id : getINodeIdWithLeases()) {
       INode inode = fsnamesystem.getFSDirectory().getInode(id);
@@ -208,7 +208,7 @@ private synchronized INode[] getINodesWithLease() {
    */
   public Set<INodesInPath> getINodeWithLeases(final INodeDirectory
       ancestorDir) throws IOException {
-    assert fsnamesystem.hasReadLock();
+    assert fsnamesystem.hasReadLock(FSNamesystemLockMode.FS);
     final long startTimeMs = Time.monotonicNow();
     Set<INodesInPath> iipSet = new HashSet<>();
     final INode[] inodes = getINodesWithLease();
@@ -543,13 +543,13 @@ public void run() {
             continue;
           }
 
-          fsnamesystem.writeLockInterruptibly();
+          fsnamesystem.writeLockInterruptibly(FSNamesystemLockMode.GLOBAL);
           try {
             if (!fsnamesystem.isInSafeMode()) {
               needSync = checkLeases(candidates);
             }
           } finally {
-            fsnamesystem.writeUnlock("leaseManager");
+            fsnamesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, 
"leaseManager");
             // lease reassignments should to be sync'ed.
             if (needSync) {
               fsnamesystem.getEditLog().logSync();
@@ -574,7 +574,7 @@ synchronized boolean checkLeases() {
 
   private synchronized boolean checkLeases(Collection<Lease> leasesToCheck) {
     boolean needSync = false;
-    assert fsnamesystem.hasWriteLock();
+    assert fsnamesystem.hasWriteLock(FSNamesystemLockMode.GLOBAL);
 
     long start = monotonicNow();
     for (Lease leaseToCheck : leasesToCheck) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index bcf56a86441..472bbc22199 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -20,6 +20,7 @@
 import org.apache.commons.lang3.ArrayUtils;
 import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.classification.VisibleForTesting;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.ipc.CallerContext;
 import org.apache.hadoop.thirdparty.com.google.common.base.Joiner;
 import org.apache.hadoop.util.Preconditions;
@@ -2238,14 +2239,14 @@ public void stopStandbyServices() throws IOException {
     
     @Override
     public void writeLock() {
-      namesystem.writeLock();
+      namesystem.writeLock(FSNamesystemLockMode.GLOBAL);
       namesystem.lockRetryCache();
     }
     
     @Override
     public void writeUnlock() {
       namesystem.unlockRetryCache();
-      namesystem.writeUnlock("HAState");
+      namesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, "HAState");
     }
     
     /** Check if an operation of given category is allowed */
@@ -2396,7 +2397,7 @@ private String reconfReplicationParameters(final String 
newVal,
       final String property) throws ReconfigurationException {
     BlockManager bm = namesystem.getBlockManager();
     int newSetting;
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
       if (property.equals(DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY)) {
         bm.setMaxReplicationStreams(
@@ -2434,7 +2435,7 @@ private String reconfReplicationParameters(final String 
newVal,
       throw new ReconfigurationException(property, newVal, getConf().get(
           property), e);
     } finally {
-      namesystem.writeUnlock("reconfReplicationParameters");
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, 
"reconfReplicationParameters");
     }
   }
 
@@ -2454,7 +2455,7 @@ private int adjustNewVal(int defaultVal, String newVal) {
   private String reconfHeartbeatInterval(final DatanodeManager datanodeManager,
       final String property, final String newVal)
       throws ReconfigurationException {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
       if (newVal == null) {
         // set to default
@@ -2471,7 +2472,7 @@ private String reconfHeartbeatInterval(final 
DatanodeManager datanodeManager,
       throw new ReconfigurationException(property, newVal, getConf().get(
           property), nfe);
     } finally {
-      namesystem.writeUnlock("reconfHeartbeatInterval");
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, 
"reconfHeartbeatInterval");
       LOG.info("RECONFIGURE* changed heartbeatInterval to "
           + datanodeManager.getHeartbeatInterval());
     }
@@ -2480,7 +2481,7 @@ private String reconfHeartbeatInterval(final 
DatanodeManager datanodeManager,
   private String reconfHeartbeatRecheckInterval(
       final DatanodeManager datanodeManager, final String property,
       final String newVal) throws ReconfigurationException {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
       if (newVal == null) {
         // set to default
@@ -2495,7 +2496,7 @@ private String reconfHeartbeatRecheckInterval(
       throw new ReconfigurationException(property, newVal, getConf().get(
           property), nfe);
     } finally {
-      namesystem.writeUnlock("reconfHeartbeatRecheckInterval");
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, 
"reconfHeartbeatRecheckInterval");
       LOG.info("RECONFIGURE* changed heartbeatRecheckInterval to "
           + datanodeManager.getHeartbeatRecheckInterval());
     }
@@ -2620,7 +2621,7 @@ String reconfigureParallelLoad(String newVal) {
   String reconfigureSlowNodesParameters(final DatanodeManager datanodeManager,
       final String property, final String newVal) throws 
ReconfigurationException {
     BlockManager bm = namesystem.getBlockManager();
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     String result;
     try {
       switch (property) {
@@ -2697,13 +2698,13 @@ String reconfigureSlowNodesParameters(final 
DatanodeManager datanodeManager,
       throw new ReconfigurationException(property, newVal, getConf().get(
           property), e);
     } finally {
-      namesystem.writeUnlock("reconfigureSlowNodesParameters");
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, 
"reconfigureSlowNodesParameters");
     }
   }
 
   private String reconfigureBlockInvalidateLimit(final DatanodeManager 
datanodeManager,
       final String property, final String newVal) throws 
ReconfigurationException {
-    namesystem.writeLock();
+    namesystem.writeLock(FSNamesystemLockMode.BM);
     try {
       if (newVal == null) {
         
datanodeManager.setBlockInvalidateLimit(DFSConfigKeys.DFS_BLOCK_INVALIDATE_LIMIT_DEFAULT);
@@ -2717,7 +2718,7 @@ private String reconfigureBlockInvalidateLimit(final 
DatanodeManager datanodeMan
     } catch (NumberFormatException e) {
       throw new ReconfigurationException(property, newVal, 
getConf().get(property), e);
     } finally {
-      namesystem.writeUnlock("reconfigureBlockInvalidateLimit");
+      namesystem.writeUnlock(FSNamesystemLockMode.BM, 
"reconfigureBlockInvalidateLimit");
     }
   }
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index bc6df0141c6..5c2ddfbdd0c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -38,6 +38,7 @@
 import java.util.concurrent.ThreadLocalRandom;
 
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockUnderConstructionFeature;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.apache.hadoop.io.IOUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -289,7 +290,8 @@ public void blockIdCK(String blockId) {
       return;
     }
 
-    namenode.getNamesystem().readLock();
+    // TODO: Just hold the BM read lock.
+    namenode.getNamesystem().readLock(FSNamesystemLockMode.GLOBAL);
     try {
       //get blockInfo
       Block block = new Block(Block.getBlockId(blockId));
@@ -353,7 +355,7 @@ public void blockIdCK(String blockId) {
       out.print("\n\n" + errMsg);
       LOG.warn("Error in looking up block", e);
     } finally {
-      namenode.getNamesystem().readUnlock("fsck");
+      namenode.getNamesystem().readUnlock(FSNamesystemLockMode.GLOBAL, "fsck");
     }
   }
 
@@ -585,7 +587,7 @@ private LocatedBlocks getBlockLocations(String path, 
HdfsFileStatus file)
     final String operationName = "fsckGetBlockLocations";
     FSPermissionChecker.setOperationType(operationName);
     FSPermissionChecker pc = fsn.getPermissionChecker();
-    fsn.readLock();
+    fsn.readLock(FSNamesystemLockMode.GLOBAL);
     try {
       blocks = FSDirStatAndListingOp.getBlockLocations(
           fsn.getFSDirectory(), pc,
@@ -594,7 +596,7 @@ private LocatedBlocks getBlockLocations(String path, 
HdfsFileStatus file)
     } catch (FileNotFoundException fnfe) {
       blocks = null;
     } finally {
-      fsn.readUnlock(operationName);
+      fsn.readUnlock(FSNamesystemLockMode.GLOBAL, operationName);
     }
     return blocks;
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 5f83a9c8d50..52d7d541c8c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -37,6 +37,7 @@
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.commons.cli.PosixParser;
+import org.apache.hadoop.hdfs.server.namenode.fgl.FSNamesystemLockMode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
@@ -1094,11 +1095,11 @@ void doMerge(
             sig.mostRecentCheckpointTxId + " even though it should have " +
             "just been downloaded");
       }
-      dstNamesystem.writeLock();
+      dstNamesystem.writeLock(FSNamesystemLockMode.GLOBAL);
       try {
         dstImage.reloadFromImageFile(file, dstNamesystem);
       } finally {
-        dstNamesystem.writeUnlock("reloadFromImageFile");
+        dstNamesystem.writeUnlock(FSNamesystemLockMode.GLOBAL, 
"reloadFromImageFile");
       }
       dstNamesystem.imageLoadComplete();
     }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
index 6be34537a05..5e6ab565a3e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLeaseManager.java
@@ -469,6 +469,8 @@ private static FSNamesystem makeMockFsNameSystem() {
     when(fsn.hasWriteLock()).thenReturn(true);
     when(fsn.hasReadLock(FSNamesystemLockMode.FS)).thenReturn(true);
     when(fsn.hasWriteLock(FSNamesystemLockMode.FS)).thenReturn(true);
+    when(fsn.hasReadLock(FSNamesystemLockMode.GLOBAL)).thenReturn(true);
+    when(fsn.hasWriteLock(FSNamesystemLockMode.GLOBAL)).thenReturn(true);
     when(fsn.getFSDirectory()).thenReturn(dir);
     
when(fsn.getMaxLockHoldToReleaseLeaseMs()).thenReturn(maxLockHoldToReleaseLeaseMs);
     return fsn;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to