This is an automated email from the ASF dual-hosted git repository.

sumitagrawal pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 8e800a31356 HDDS-14574. Enforce 700 permissions on Ozone Metadata and 
Data(hdds) directories (#9735)
8e800a31356 is described below

commit 8e800a313562499a97ecf578e0e1b111496464b5
Author: Gargi Jaiswal <[email protected]>
AuthorDate: Tue Mar 3 14:26:53 2026 +0530

    HDDS-14574. Enforce 700 permissions on Ozone Metadata and Data(hdds) 
directories (#9735)
---
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |   2 +
 .../common/src/main/resources/ozone-default.xml    |  22 ++--
 .../container/common/volume/MutableVolumeSet.java  |  11 ++
 .../container/common/volume/StorageVolume.java     |  33 ++++++
 .../container/common/volume/TestHddsVolume.java    |  39 +++++++
 .../org/apache/hadoop/hdds/server/ServerUtils.java |  39 +++++++
 .../apache/hadoop/hdds/server/TestServerUtils.java | 124 +++++++++++++++++++++
 7 files changed, 263 insertions(+), 7 deletions(-)

diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 23400b1a06b..69a4d0411f2 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -221,6 +221,8 @@ public final class ScmConfigKeys {
   public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
   public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
   public static final String HDDS_DATANODE_DIR_KEY = "hdds.datanode.dir";
+  public static final String HDDS_DATANODE_DATA_DIR_PERMISSIONS =
+      "hdds.datanode.data.dir.permissions";
   public static final String HDDS_DATANODE_DIR_DU_RESERVED =
       "hdds.datanode.dir.du.reserved";
   public static final String HDDS_DATANODE_DIR_DU_RESERVED_PERCENT =
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 8e83a4da5ed..3bbefe3bb68 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -178,6 +178,14 @@
       tagged explicitly.
     </description>
   </property>
+  <property>
+    <name>hdds.datanode.data.dir.permissions</name>
+    <value>700</value>
+    <description>Permissions for the datanode data directories where actual 
file blocks are stored.
+      The permissions can be either octal or symbolic. If the default 
permissions are not set
+      then the default value of 700 will be used.
+    </description>
+  </property>
   <property>
     <name>hdds.datanode.container.db.dir</name>
     <value/>
@@ -674,11 +682,11 @@
   </property>
   <property>
     <name>ozone.om.db.dirs.permissions</name>
-    <value>750</value>
+    <value>700</value>
     <description>
       Permissions for the metadata directories for Ozone Manager. The
       permissions have to be octal or symbolic. If the default permissions are 
not set
-      then the default value of 750 will be used.
+      then the default value of 700 will be used.
     </description>
   </property>
   <property>
@@ -705,7 +713,7 @@
   </property>
   <property>
     <name>ozone.metadata.dirs.permissions</name>
-    <value>750</value>
+    <value>700</value>
     <description>
       Permissions for the metadata directories for fallback location for SCM, 
OM, Recon and DataNodes
       to store their metadata. The permissions have to be octal or symbolic. 
This is the fallback used in case the
@@ -750,11 +758,11 @@
   </property>
   <property>
     <name>ozone.scm.db.dirs.permissions</name>
-    <value>750</value>
+    <value>700</value>
     <description>
       Permissions for the metadata directories for Storage Container Manager. 
The
       permissions can either be octal or symbolic. If the default permissions 
are not set
-      then the default value of 750 will be used.
+      then the default value of 700 will be used.
     </description>
   </property>
   <property>
@@ -3369,11 +3377,11 @@
   </property>
   <property>
     <name>ozone.recon.db.dirs.permissions</name>
-    <value>750</value>
+    <value>700</value>
     <description>
       Permissions for the metadata directories for Recon. The
       permissions can either be octal or symbolic. If the default permissions 
are not set
-      then the default value of 750 will be used.
+      then the default value of 700 will be used.
     </description>
   </property>
   <property>
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index bef9c257178..9ce69fa14bd 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -30,6 +30,8 @@
 import java.util.function.Function;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
@@ -165,6 +167,15 @@ private void initializeVolumeSet() throws IOException {
           throw new IOException("Failed to create storage dir " +
               volume.getStorageDir());
         }
+
+        // Ensure permissions are set on the storage directory
+        // (permissions are also set in StorageVolume.initializeImpl(),
+        // but this ensures they're set even if directory already existed
+        // from a previous run with incorrect permissions)
+        if (volumeType == StorageVolume.VolumeType.DATA_VOLUME) {
+          ServerUtils.setDataDirectoryPermissions(volume.getStorageDir(), conf,
+              ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+        }
         volumeMap.put(volume.getStorageDir().getPath(), volume);
         volumeHealthMetrics.incrementHealthyVolumes();
       } catch (IOException e) {
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
index b3946831831..5260f846893 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.container.common.volume;
 
 import static 
org.apache.hadoop.ozone.container.common.HDDSVolumeLayoutVersion.getLatestVersion;
+import static 
org.apache.hadoop.ozone.container.common.volume.HddsVolume.HDDS_VOLUME_DIR;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -40,6 +41,8 @@
 import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams;
 import org.apache.hadoop.hdds.fs.SpaceUsageSource;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
@@ -226,16 +229,22 @@ protected void initializeImpl() throws IOException {
       if (!getStorageDir().mkdirs()) {
         throw new IOException("Cannot create directory " + getStorageDir());
       }
+      // Set permissions on storage directory (e.g., hdds subdirectory)
+      setStorageDirPermissions();
       setState(VolumeState.NOT_FORMATTED);
       createVersionFile();
       break;
     case NOT_FORMATTED:
       // Version File does not exist. Create it.
+      // Ensure permissions are correct even if directory already existed
+      setStorageDirPermissions();
       createVersionFile();
       break;
     case NOT_INITIALIZED:
       // Version File exists.
       // Verify its correctness and update property fields.
+      // Ensure permissions are correct even if directory already existed
+      setStorageDirPermissions();
       readVersionFile();
       setState(VolumeState.NORMAL);
       break;
@@ -461,6 +470,10 @@ public boolean getFailedVolume() {
     public StorageType getStorageType() {
       return this.storageType;
     }
+
+    public String getStorageDirStr() {
+      return this.storageDirStr;
+    }
   }
 
   public String getVolumeRootDir() {
@@ -768,6 +781,15 @@ private static SpaceUsageCheckParams 
getSpaceUsageCheckParams(Builder b, Supplie
       throw new IOException("Unable to create the volume root dir at " + root);
     }
 
+    // Set permissions on volume root directory immediately after 
creation/check
+    // (for data volumes, we want to ensure the root has secure permissions,
+    // even if the directory already existed from a previous run)
+    // This follows the same pattern as metadata directories in 
getDirectoryFromConfig()
+    if (b.conf != null && root.exists() && 
HDDS_VOLUME_DIR.equals(b.getStorageDirStr())) {
+      ServerUtils.setDataDirectoryPermissions(root, b.conf,
+          ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+    }
+
     SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory;
     if (usageCheckFactory == null) {
       usageCheckFactory = SpaceUsageCheckFactory.create(b.conf);
@@ -775,4 +797,15 @@ private static SpaceUsageCheckParams 
getSpaceUsageCheckParams(Builder b, Supplie
 
     return usageCheckFactory.paramsFor(root, exclusionProvider);
   }
+
+  /**
+   * Sets permissions on the storage directory (e.g., hdds subdirectory).
+   */
+  private void setStorageDirPermissions() {
+    if (conf != null && getStorageDir().exists()
+        && HDDS_VOLUME_DIR.equals(getStorageDir().getName())) {
+      ServerUtils.setDataDirectoryPermissions(getStorageDir(), conf,
+          ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+    }
+  }
 }
diff --git 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
index 0bd576732a0..1dd927b6f48 100644
--- 
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ 
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
@@ -31,10 +31,14 @@
 import java.nio.file.Files;
 import java.nio.file.Path;
 import java.nio.file.Paths;
+import java.nio.file.attribute.PosixFilePermission;
+import java.nio.file.attribute.PosixFilePermissions;
 import java.time.Duration;
 import java.util.Properties;
+import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Stream;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -56,6 +60,9 @@
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.io.TempDir;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
 
 /**
  * Unit tests for {@link HddsVolume}.
@@ -605,4 +612,36 @@ private MutableVolumeSet createDbVolumeSet() throws 
IOException {
     dbVolumeSet.getVolumesList().get(0).createWorkingDir(CLUSTER_ID, null);
     return dbVolumeSet;
   }
+
+  private static Stream<Arguments> dataDirectoryPermissionsProvider() {
+    return Stream.of(
+        Arguments.of("700", "rwx------"),
+        Arguments.of("750", "rwxr-x---"),
+        Arguments.of("755", "rwxr-xr-x")
+    );
+  }
+
+  @ParameterizedTest
+  @MethodSource("dataDirectoryPermissionsProvider")
+  public void testDataDirectoryPermissions(String permissionValue,
+      String expectedPermissionString) throws Exception {
+    // Set permission configuration
+    CONF.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, 
permissionValue);
+
+    // Create a new volume
+    StorageVolume volume = volumeBuilder.build();
+    volume.format(CLUSTER_ID);
+
+    // Verify storage directory (hdds subdirectory) has correct permissions
+    File storageDir = volume.getStorageDir();
+    assertTrue(storageDir.exists());
+    Path storageDirPath = storageDir.toPath();
+    Set<PosixFilePermission> expectedPermissions =
+        PosixFilePermissions.fromString(expectedPermissionString);
+    Set<PosixFilePermission> actualPermissions =
+        Files.getPosixFilePermissions(storageDirPath);
+
+    assertEquals(expectedPermissions, actualPermissions,
+        "Storage directory should have " + permissionValue + " permissions");
+  }
 }
diff --git 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
index d6864d4f15d..481df512496 100644
--- 
a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
+++ 
b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
@@ -254,6 +254,45 @@ public static String getPermissions(String key, 
ConfigurationSource conf) {
         "Invalid configuration value for key: " + key);
   }
 
+  /**
+   * Sets directory permissions based on configuration.
+   *
+   * @param dir The directory to set permissions on
+   * @param conf Configuration source
+   * @param permissionConfigKey The configuration key for permissions
+   */
+  public static void setDataDirectoryPermissions(File dir, ConfigurationSource 
conf,
+      String permissionConfigKey) {
+    if (dir == null || !dir.exists()) {
+      return;
+    }
+    // Don't attempt to set permissions on non-writable directories
+    // This allows volume initialization to fail naturally for read-only 
volumes
+    if (!dir.canWrite()) {
+      LOG.debug("Skipping permission setting for non-writable directory {}", 
dir);
+      return;
+    }
+
+    try {
+      String permissionValue = conf.get(permissionConfigKey);
+      if (permissionValue == null) {
+        LOG.warn("Permission configuration key {} not found, skipping 
permission " +
+            "setting for directory {}", permissionConfigKey, dir);
+        return;
+      }
+      String symbolicPermission = getSymbolicPermission(permissionValue);
+      Path path = dir.toPath();
+      Files.setPosixFilePermissions(path,
+          PosixFilePermissions.fromString(symbolicPermission));
+      LOG.debug("Set permissions {} on directory {} using config key {}",
+          symbolicPermission, dir, permissionConfigKey);
+    } catch (Exception e) {
+      LOG.warn("Failed to set directory permissions for {}: {}", dir,
+          e.getMessage());
+      // Don't throw exception to avoid failing startup, just log warning
+    }
+  }
+
   /**
    * Checks and creates Ozone Metadir Path if it does not exist.
    *
diff --git 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java
 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java
index 1eb633f455e..7a50a09cb21 100644
--- 
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java
+++ 
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java
@@ -80,6 +80,27 @@ public void testGetPermissions() {
 
   }
 
+  @Test
+  public void testGetPermissionsWithDefaults() {
+    // Create an OzoneConfiguration without explicitly setting permissions
+    // Should fall back to default values from ozone-default.xml (700)
+    OzoneConfiguration conf = new OzoneConfiguration();
+
+    // Test getPermissions for different config names and verify they use 
defaults
+    assertEquals("700",
+        ServerUtils.getPermissions(ReconConfigKeys.OZONE_RECON_DB_DIR, conf),
+        "Should use default 700 for Recon DB dirs");
+    assertEquals("700",
+        ServerUtils.getPermissions(ScmConfigKeys.OZONE_SCM_DB_DIRS, conf),
+        "Should use default 700 for SCM DB dirs");
+    assertEquals("700",
+        ServerUtils.getPermissions(OzoneConfigKeys.OZONE_METADATA_DIRS, conf),
+        "Should use default 700 for metadata dirs");
+    assertEquals("700",
+        ServerUtils.getPermissions(OzoneConfigKeys.OZONE_OM_DB_DIRS, conf),
+        "Should use default 700 for OM DB dirs");
+  }
+
   @Test
   public void testGetDirectoryFromConfigWithOctalPermissions()
       throws IOException {
@@ -439,4 +460,107 @@ public void 
testColocatedComponentsWithSharedMetadataDirForSnapshots() {
       FileUtils.deleteQuietly(metaDir);
     }
   }
+
+  @Test
+  public void testSetDataDirectoryPermissionsWithOctal() throws IOException {
+    File testDir = new File(folder.toFile(), "testDir");
+    assertTrue(testDir.mkdirs());
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, "700");
+
+    ServerUtils.setDataDirectoryPermissions(testDir, conf,
+        ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+
+    Path dirPath = testDir.toPath();
+    Set<PosixFilePermission> expectedPermissions =
+        PosixFilePermissions.fromString("rwx------");
+    Set<PosixFilePermission> actualPermissions =
+        Files.getPosixFilePermissions(dirPath);
+
+    assertEquals(expectedPermissions, actualPermissions);
+  }
+
+  @Test
+  public void testSetDataDirectoryPermissionsWithSymbolic() throws IOException 
{
+    File testDir = new File(folder.toFile(), "testDir2");
+    assertTrue(testDir.mkdirs());
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, "rwx------");
+
+    ServerUtils.setDataDirectoryPermissions(testDir, conf,
+        ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+
+    Path dirPath = testDir.toPath();
+    Set<PosixFilePermission> expectedPermissions =
+        PosixFilePermissions.fromString("rwx------");
+    Set<PosixFilePermission> actualPermissions =
+        Files.getPosixFilePermissions(dirPath);
+
+    assertEquals(expectedPermissions, actualPermissions);
+  }
+
+  @Test
+  public void testSetDataDirectoryPermissionsWithDefaultValue() throws 
IOException {
+    File testDir = new File(folder.toFile(), "testDir3");
+    assertTrue(testDir.mkdirs());
+
+    OzoneConfiguration conf = new OzoneConfiguration();
+    // Don't explicitly set the permission config key - should use default 
value (700)
+
+    // Should use default value from ozone-default.xml (700)
+    ServerUtils.setDataDirectoryPermissions(testDir, conf,
+        ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+
+    // Permissions should be set to default value (700 = rwx------)
+    Path dirPath = testDir.toPath();
+    Set<PosixFilePermission> expectedPermissions =
+        PosixFilePermissions.fromString("rwx------");
+    Set<PosixFilePermission> actualPermissions =
+        Files.getPosixFilePermissions(dirPath);
+    assertEquals(expectedPermissions, actualPermissions);
+  }
+
+  @Test
+  public void testSetDataDirectoryPermissionsWithNonExistentDir() {
+    File nonExistentDir = new File(folder.toFile(), "nonExistent");
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, "700");
+
+    // Should not throw exception for non-existent directory
+    ServerUtils.setDataDirectoryPermissions(nonExistentDir, conf,
+        ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+  }
+
+  @Test
+  public void testSetDataDirectoryPermissionsSkipsReadOnlyDir() throws 
IOException {
+    // Create a directory and set it to read-only
+    File readOnlyDir = new File(folder.toFile(), "readOnlyDir");
+    assertTrue(readOnlyDir.mkdirs());
+
+    // Set initial permissions and make it read-only
+    Path dirPath = readOnlyDir.toPath();
+    Set<PosixFilePermission> readOnlyPermissions =
+        PosixFilePermissions.fromString("r-xr-xr-x");
+    Files.setPosixFilePermissions(dirPath, readOnlyPermissions);
+
+    // Verify directory is read-only
+    assertFalse(readOnlyDir.canWrite());
+
+    // Configure system to use 700 permissions
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, "700");
+
+    // Call setDataDirectoryPermissions on read-only directory
+    // Should skip permission setting and not throw exception
+    ServerUtils.setDataDirectoryPermissions(readOnlyDir, conf,
+        ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+
+    // Verify permissions were NOT changed (still read-only)
+    Set<PosixFilePermission> actualPermissions =
+        Files.getPosixFilePermissions(dirPath);
+    assertEquals(readOnlyPermissions, actualPermissions);
+    assertFalse(readOnlyDir.canWrite());
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to