This is an automated email from the ASF dual-hosted git repository.

weichiu pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new 8bb9807  HDFS-2470. NN should automatically set permissions on 
dfs.namenode.*.dir. Contributed by Siddharth Wagle.
8bb9807 is described below

commit 8bb98076bef835229ef3498dd740921c47ed3e24
Author: Arpit Agarwal <a...@apache.org>
AuthorDate: Mon Aug 26 15:43:52 2019 -0700

    HDFS-2470. NN should automatically set permissions on dfs.namenode.*.dir. 
Contributed by Siddharth Wagle.
    
    (cherry picked from commit a64a43b77fb1032dcb66730a6b6257a24726c256)
    (cherry picked from commit 8b1238171752d03712ae69d8464108ef0803ae10)
    
     Conflicts:
        
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
        
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
---
 .../java/org/apache/hadoop/hdfs/DFSConfigKeys.java |  8 +++++++
 .../hadoop/hdfs/qjournal/server/JNStorage.java     |  6 ++++-
 .../apache/hadoop/hdfs/server/common/Storage.java  | 28 ++++++++++++++++++----
 .../hadoop/hdfs/server/namenode/FSImage.java       |  2 +-
 .../hadoop/hdfs/server/namenode/NNStorage.java     | 24 ++++++++++++++-----
 .../src/main/resources/hdfs-default.xml            | 20 ++++++++++++++++
 .../hadoop/hdfs/server/namenode/TestEditLog.java   | 14 +++++++++++
 .../hadoop/hdfs/server/namenode/TestStartup.java   | 27 ++++++++++++++++++++-
 8 files changed, 115 insertions(+), 14 deletions(-)

diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index d6ed729..02ce2f4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -538,6 +538,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String  DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" 
+ DFS_NAMENODE_HTTPS_PORT_DEFAULT;
   public static final String  DFS_NAMENODE_NAME_DIR_KEY =
       HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_NAME_DIR_KEY;
+  public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_KEY =
+      "dfs.namenode.storage.dir.perm";
+  public static final String DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT =
+      "700";
   public static final String  DFS_NAMENODE_EDITS_DIR_KEY =
       HdfsClientConfigKeys.DeprecatedKeys.DFS_NAMENODE_EDITS_DIR_KEY;
   public static final String  DFS_NAMENODE_SHARED_EDITS_DIR_KEY = 
"dfs.namenode.shared.edits.dir";
@@ -1029,6 +1033,10 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final int     DFS_JOURNALNODE_RPC_PORT_DEFAULT = 8485;
   public static final String  DFS_JOURNALNODE_RPC_BIND_HOST_KEY = 
"dfs.journalnode.rpc-bind-host";
   public static final String  DFS_JOURNALNODE_RPC_ADDRESS_DEFAULT = "0.0.0.0:" 
+ DFS_JOURNALNODE_RPC_PORT_DEFAULT;
+  public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY =
+      "dfs.journalnode.edits.dir.perm";
+  public static final String DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT =
+      "700";
 
   public static final String  DFS_JOURNALNODE_HTTP_ADDRESS_KEY = 
"dfs.journalnode.http-address";
   public static final int     DFS_JOURNALNODE_HTTP_PORT_DEFAULT = 8480;
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
index e886432..ee99e2b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java
@@ -26,6 +26,8 @@ import java.util.regex.Pattern;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ -65,7 +67,9 @@ class JNStorage extends Storage {
       StorageErrorReporter errorReporter) throws IOException {
     super(NodeType.JOURNAL_NODE);
     
-    sd = new StorageDirectory(logDir);
+    sd = new StorageDirectory(logDir, null, false, new FsPermission(conf.get(
+        DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY,
+        DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_DEFAULT)));
     this.addStorageDir(sd);
     this.fjm = new FileJournalManager(conf, sd, errorReporter);
 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
index 3dd43c7..2ba943a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
@@ -27,11 +27,14 @@ import java.nio.channels.FileLock;
 import java.nio.channels.OverlappingFileLockException;
 import java.nio.file.DirectoryStream;
 import java.nio.file.Files;
+import java.nio.file.attribute.PosixFilePermission;
+import java.nio.file.attribute.PosixFilePermissions;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Properties;
+import java.util.Set;
 import java.util.concurrent.CopyOnWriteArrayList;
 
 import org.apache.commons.io.FileUtils;
@@ -39,6 +42,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@@ -276,6 +280,7 @@ public abstract class Storage extends StorageInfo {
     final boolean isShared;
     final StorageDirType dirType; // storage dir type
     FileLock lock;                // storage lock
+    private final FsPermission permission;
 
     private String storageUuid = null;      // Storage directory identifier.
     
@@ -311,6 +316,11 @@ public abstract class Storage extends StorageInfo {
       this(dir, dirType, isShared, null);
     }
 
+    public StorageDirectory(File dir, StorageDirType dirType,
+                            boolean isShared, FsPermission permission) {
+      this(dir, dirType, isShared, null, permission);
+    }
+
     /**
      * Constructor
      * @param dirType storage directory type
@@ -320,7 +330,7 @@ public abstract class Storage extends StorageInfo {
      */
     public StorageDirectory(StorageDirType dirType, boolean isShared,
         StorageLocation location) {
-      this(getStorageLocationFile(location), dirType, isShared, location);
+      this(getStorageLocationFile(location), dirType, isShared, location, 
null);
     }
 
     /**
@@ -334,7 +344,7 @@ public abstract class Storage extends StorageInfo {
     public StorageDirectory(String bpid, StorageDirType dirType,
         boolean isShared, StorageLocation location) {
       this(getBlockPoolCurrentDir(bpid, location), dirType,
-          isShared, location);
+          isShared, location, null);
     }
 
     private static File getBlockPoolCurrentDir(String bpid,
@@ -348,13 +358,14 @@ public abstract class Storage extends StorageInfo {
     }
 
     private StorageDirectory(File dir, StorageDirType dirType,
-        boolean isShared, StorageLocation location) {
+        boolean isShared, StorageLocation location, FsPermission permission) {
       this.root = dir;
       this.lock = null;
       // default dirType is UNDEFINED
       this.dirType = (dirType == null ? NameNodeDirType.UNDEFINED : dirType);
       this.isShared = isShared;
       this.location = location;
+      this.permission = permission;
       assert location == null || dir == null ||
           dir.getAbsolutePath().startsWith(
               new File(location.getUri()).getAbsolutePath()):
@@ -432,8 +443,14 @@ public abstract class Storage extends StorageInfo {
         if (!(FileUtil.fullyDelete(curDir)))
           throw new IOException("Cannot remove current directory: " + curDir);
       }
-      if (!curDir.mkdirs())
+      if (!curDir.mkdirs()) {
         throw new IOException("Cannot create directory " + curDir);
+      }
+      if (permission != null) {
+        Set<PosixFilePermission> permissions =
+            PosixFilePermissions.fromString(permission.toString());
+        Files.setPosixFilePermissions(curDir.toPath(), permissions);
+      }
     }
 
     /**
@@ -655,8 +672,9 @@ public abstract class Storage extends StorageInfo {
             return StorageState.NON_EXISTENT;
           }
           LOG.info("{} does not exist. Creating ...", rootPath);
-          if (!root.mkdirs())
+          if (!root.mkdirs()) {
             throw new IOException("Cannot create directory " + rootPath);
+          }
           hadMkdirs = true;
         }
         // or is inaccessible
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 469e7ec..71a7053 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -298,7 +298,7 @@ public class FSImage implements Closeable {
         // triggered.
         LOG.info("Storage directory " + sd.getRoot() + " is not formatted.");
         LOG.info("Formatting ...");
-        sd.clearDirectory(); // create empty currrent dir
+        sd.clearDirectory(); // create empty current dir
         // For non-HA, no further action is needed here, as saveNamespace will
         // take care of the rest.
         if (!target.isHaEnabled()) {
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
index ca0d410..98ae44e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
@@ -38,6 +38,8 @@ import java.util.concurrent.ThreadLocalRandom;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -128,7 +130,7 @@ public class NNStorage extends Storage implements Closeable,
   private boolean restoreFailedStorage = false;
   private final Object restorationLock = new Object();
   private boolean disablePreUpgradableLayoutCheck = false;
-
+  private final Configuration conf;
 
   /**
    * TxId of the last transaction that was included in the most
@@ -171,6 +173,7 @@ public class NNStorage extends Storage implements Closeable,
                    Collection<URI> imageDirs, Collection<URI> editsDirs) 
       throws IOException {
     super(NodeType.NAME_NODE);
+    this.conf = conf;
 
     // this may modify the editsDirs, so copy before passing in
     setStorageDirectories(imageDirs, 
@@ -312,10 +315,16 @@ public class NNStorage extends Storage implements 
Closeable,
                           NameNodeDirType.IMAGE;
       // Add to the list of storage directories, only if the
       // URI is of type file://
-      if(dirName.getScheme().compareTo("file") == 0) {
-        this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
+      if (dirName.getScheme().compareTo("file") == 0) {
+        // Don't lock the dir if it's shared.
+        StorageDirectory sd = new StorageDirectory(new File(dirName.getPath()),
             dirType,
-            sharedEditsDirs.contains(dirName))); // Don't lock the dir if it's 
shared.
+            sharedEditsDirs.contains(dirName),
+            new FsPermission(conf.get(
+                DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY,
+                DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT)));
+
+        this.addStorageDir(sd);
       }
     }
 
@@ -324,9 +333,12 @@ public class NNStorage extends Storage implements 
Closeable,
       checkSchemeConsistency(dirName);
       // Add to the list of storage directories, only if the
       // URI is of type file://
-      if(dirName.getScheme().compareTo("file") == 0) {
+      if (dirName.getScheme().compareTo("file") == 0) {
         this.addStorageDir(new StorageDirectory(new File(dirName.getPath()),
-            NameNodeDirType.EDITS, sharedEditsDirs.contains(dirName)));
+            NameNodeDirType.EDITS, sharedEditsDirs.contains(dirName),
+            new FsPermission(conf.get(
+                DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY,
+                DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT))));
       }
     }
   }
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 97aaec2..ada4de0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -5131,4 +5131,24 @@
       The queue size of BlockReportProcessingThread in BlockManager.
     </description>
   </property>
+
+  <property>
+    <name>dfs.namenode.storage.dir.perm</name>
+    <value>700</value>
+    <description>
+      Permissions for the directories on on the local filesystem where
+      the DFS namenode stores the fsImage. The permissions can either be
+      octal or symbolic.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.journalnode.edits.dir.perm</name>
+    <value>700</value>
+    <description>
+      Permissions for the directories on on the local filesystem where
+      the DFS journal node stores the edits. The permissions can either be
+      octal or symbolic.
+    </description>
+  </property>
 </configuration>
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index 1611fe2..c5dd875 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -56,6 +56,9 @@ import java.util.regex.Pattern;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.LocalFileSystem;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FileSystem;
@@ -1255,6 +1258,17 @@ public class TestEditLog {
                                       Collections.<URI>emptyList(),
                                       editUris);
     storage.format(new NamespaceInfo());
+    // Verify permissions
+    LocalFileSystem fs = LocalFileSystem.getLocal(getConf());
+    for (URI uri : editUris) {
+      String currDir = uri.getPath() + Path.SEPARATOR +
+          Storage.STORAGE_DIR_CURRENT;
+      FileStatus fileStatus = fs.getFileLinkStatus(new Path(currDir));
+      FsPermission permission = fileStatus.getPermission();
+      assertEquals(getConf().getInt(
+          DFSConfigKeys.DFS_JOURNAL_EDITS_DIR_PERMISSION_KEY, 700),
+          permission.toOctal());
+    }
     FSEditLog editlog = getFSEditLog(storage);    
     // open the edit log and add two transactions
     // logGenerationStamp is used, simply because it doesn't 
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
index 2401608..68bc66b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
@@ -37,9 +37,11 @@ import java.nio.file.Paths;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.stream.Collectors;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.LocalFileSystem;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
@@ -104,7 +106,7 @@ public class TestStartup {
     config = new HdfsConfiguration();
     hdfsDir = new File(MiniDFSCluster.getBaseDirectory());
 
-    if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
+    if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
       throw new IOException("Could not delete hdfs directory '" + hdfsDir + 
"'");
     }
     LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
@@ -790,4 +792,27 @@ public class TestStartup {
     return;
   }
 
+  @Test(timeout = 60000)
+  public void testDirectoryPermissions() throws Exception {
+    Configuration conf = new Configuration();
+    try (MiniDFSCluster dfsCluster
+             = new MiniDFSCluster.Builder(conf).build()) {
+      dfsCluster.waitActive();
+      // name and edits
+      List<StorageDirectory> nameDirs =
+          dfsCluster.getNameNode().getFSImage().getStorage().getStorageDirs();
+      Collection<URI> nameDirUris = nameDirs.stream().map(d -> d
+          .getCurrentDir().toURI()).collect(Collectors.toList());
+      assertNotNull(nameDirUris);
+      LocalFileSystem fs = LocalFileSystem.getLocal(config);
+      FsPermission permission = new FsPermission(conf.get(
+          DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_KEY,
+          DFSConfigKeys.DFS_NAMENODE_NAME_DIR_PERMISSION_DEFAULT));
+      for (URI uri : nameDirUris) {
+        FileStatus fileStatus = fs.getFileLinkStatus(new Path(uri));
+        assertEquals(permission.toOctal(),
+            fileStatus.getPermission().toOctal());
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to