HDFS-9439. Support reconfiguring fs.protected.directories without NN restart. (Contributed by Xiaobing Zhou)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed15cb9a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed15cb9a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed15cb9a Branch: refs/heads/branch-2 Commit: ed15cb9a307e8084682c6b248d1528e7d9dd7978 Parents: 8ee2140 Author: Arpit Agarwal <[email protected]> Authored: Tue Mar 29 13:55:00 2016 -0700 Committer: Arpit Agarwal <[email protected]> Committed: Tue Mar 29 13:56:02 2016 -0700 ---------------------------------------------------------------------- .../hdfs/server/namenode/FSDirectory.java | 48 ++++++++++++++++++-- .../hadoop/hdfs/server/namenode/NameNode.java | 9 +++- .../namenode/TestProtectedDirectories.java | 44 ++++++++++++++++++ .../apache/hadoop/hdfs/tools/TestDFSAdmin.java | 2 +- 4 files changed, 96 insertions(+), 7 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed15cb9a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index f0031c5..f140020 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -17,7 +17,10 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import org.apache.hadoop.util.StringUtils; + import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.protobuf.InvalidProtocolBufferException; @@ -146,7 +149,7 @@ public class FSDirectory implements Closeable { // be deleted unless they are empty. // // Each entry in this set must be a normalized path. - private final SortedSet<String> protectedDirectories; + private volatile SortedSet<String> protectedDirectories; // lock to protect the directory and BlockMap private final ReentrantReadWriteLock dirLock; @@ -370,16 +373,53 @@ public class FSDirectory implements Closeable { */ @VisibleForTesting static SortedSet<String> parseProtectedDirectories(Configuration conf) { + return parseProtectedDirectories(conf + .getTrimmedStringCollection(FS_PROTECTED_DIRECTORIES)); + } + + /** + * Parse configuration setting dfs.namenode.protected.directories to retrieve + * the set of protected directories. + * + * @param protectedDirsString + * a comma separated String representing a bunch of paths. + * @return a TreeSet + */ + @VisibleForTesting + static SortedSet<String> parseProtectedDirectories( + final String protectedDirsString) { + return parseProtectedDirectories(StringUtils + .getTrimmedStringCollection(protectedDirsString)); + } + + private static SortedSet<String> parseProtectedDirectories( + final Collection<String> protectedDirs) { // Normalize each input path to guard against administrator error. - return new TreeSet<>(normalizePaths( - conf.getTrimmedStringCollection(FS_PROTECTED_DIRECTORIES), - FS_PROTECTED_DIRECTORIES)); + return new TreeSet<>( + normalizePaths(protectedDirs, FS_PROTECTED_DIRECTORIES)); } SortedSet<String> getProtectedDirectories() { return protectedDirectories; } + /** + * Set directories that cannot be removed unless empty, even by an + * administrator. + * + * @param protectedDirsString + * comma separated list of protected directories + */ + String setProtectedDirectories(String protectedDirsString) { + if (protectedDirsString == null) { + protectedDirectories = new TreeSet<>(); + } else { + protectedDirectories = parseProtectedDirectories(protectedDirsString); + } + + return Joiner.on(",").skipNulls().join(protectedDirectories); + } + BlockManager getBlockManager() { return getFSNamesystem().getBlockManager(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed15cb9a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 8a87a1d..0e6cb90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -149,6 +149,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.FS_PROTECTED_DIRECTORIES; import static org.apache.hadoop.util.ExitUtil.terminate; import static org.apache.hadoop.util.ToolRunner.confirmPrompt; @@ -272,8 +273,10 @@ public class NameNode extends ReconfigurableBase implements /** A list of property that are reconfigurable at runtime. */ static final List<String> RECONFIGURABLE_PROPERTIES = Collections - .unmodifiableList(Arrays.asList(DFS_HEARTBEAT_INTERVAL_KEY, - DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY)); + .unmodifiableList(Arrays + .asList(DFS_HEARTBEAT_INTERVAL_KEY, + DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, + FS_PROTECTED_DIRECTORIES)); private static final String USAGE = "Usage: hdfs namenode [" + StartupOption.BACKUP.getName() + "] | \n\t[" @@ -2011,6 +2014,8 @@ public class NameNode extends ReconfigurableBase implements LOG.info("RECONFIGURE* changed heartbeatRecheckInterval to " + datanodeManager.getHeartbeatRecheckInterval()); } + case FS_PROTECTED_DIRECTORIES: + return getNamesystem().getFSDirectory().setProtectedDirectories(newVal); default: break; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed15cb9a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java index be7b686..e7d2d6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProtectedDirectories.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.namenode; import com.google.common.base.Joiner; import com.google.common.collect.Iterables; import com.google.common.collect.Maps; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystem; @@ -28,6 +29,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.junit.Rule; import org.junit.Test; import org.junit.rules.Timeout; @@ -38,8 +40,10 @@ import java.io.IOException; import java.util.*; import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertThat; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_PROTECTED_DIRECTORIES; /** * Verify that the dfs.namenode.protected.directories setting is respected. @@ -190,6 +194,46 @@ public class TestProtectedDirectories { } @Test + public void testReconfigureProtectedPaths() throws Throwable { + Configuration conf = new HdfsConfiguration(); + Collection<Path> protectedPaths = Arrays.asList(new Path("/a"), new Path( + "/b"), new Path("/c")); + Collection<Path> unprotectedPaths = Arrays.asList(); + + MiniDFSCluster cluster = setupTestCase(conf, protectedPaths, + unprotectedPaths); + + SortedSet<String> protectedPathsNew = new TreeSet<>( + FSDirectory.normalizePaths(Arrays.asList("/aa", "/bb", "/cc"), + FS_PROTECTED_DIRECTORIES)); + + String protectedPathsStrNew = "/aa,/bb,/cc"; + + NameNode nn = cluster.getNameNode(); + + // change properties + nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, protectedPathsStrNew); + + FSDirectory fsDirectory = nn.getNamesystem().getFSDirectory(); + // verify change + assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), + protectedPathsNew, fsDirectory.getProtectedDirectories()); + + assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), + protectedPathsStrNew, nn.getConf().get(FS_PROTECTED_DIRECTORIES)); + + // revert to default + nn.reconfigureProperty(FS_PROTECTED_DIRECTORIES, null); + + // verify default + assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), + new TreeSet<String>(), fsDirectory.getProtectedDirectories()); + + assertEquals(String.format("%s has wrong value", FS_PROTECTED_DIRECTORIES), + null, nn.getConf().get(FS_PROTECTED_DIRECTORIES)); + } + + @Test public void testAll() throws Throwable { for (TestMatrixEntry testMatrixEntry : createTestMatrix()) { Configuration conf = new HdfsConfiguration(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed15cb9a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java index 81f93aa..3ca7fec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSAdmin.java @@ -234,7 +234,7 @@ public class TestDFSAdmin { final List<String> outs = Lists.newArrayList(); final List<String> errs = Lists.newArrayList(); getReconfigurableProperties("namenode", address, outs, errs); - assertEquals(3, outs.size()); + assertEquals(4, outs.size()); assertEquals(DFS_HEARTBEAT_INTERVAL_KEY, outs.get(1)); assertEquals(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, outs.get(2)); assertEquals(errs.size(), 0);
