svn commit: r1810621 - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/
Author: sidharta Date: Tue Oct 3 03:21:15 2017 New Revision: 1810621 URL: http://svn.apache.org/viewvc?rev=1810621=rev Log: Added sidharta to the list of Apache Hadoop committers Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml hadoop/common/site/main/publish/bylaws.pdf hadoop/common/site/main/publish/committer_criteria.pdf hadoop/common/site/main/publish/index.pdf hadoop/common/site/main/publish/issue_tracking.pdf hadoop/common/site/main/publish/linkmap.pdf hadoop/common/site/main/publish/mailing_lists.pdf hadoop/common/site/main/publish/privacy_policy.pdf hadoop/common/site/main/publish/releases.pdf hadoop/common/site/main/publish/version_control.pdf hadoop/common/site/main/publish/versioning.pdf hadoop/common/site/main/publish/who.html hadoop/common/site/main/publish/who.pdf Modified: hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml URL: http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml?rev=1810621=1810620=1810621=diff == --- hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml (original) +++ hadoop/common/site/main/author/src/documentation/content/xdocs/who.xml Tue Oct 3 03:21:15 2017 @@ -1404,6 +1404,14 @@ + sidharta + Sidharta Seethana + Hortonworks + + -8 + + + sseth Siddharth Seth Hortonworks Modified: hadoop/common/site/main/publish/bylaws.pdf URL: http://svn.apache.org/viewvc/hadoop/common/site/main/publish/bylaws.pdf?rev=1810621=1810620=1810621=diff == Binary files - no diff available. Modified: hadoop/common/site/main/publish/committer_criteria.pdf URL: http://svn.apache.org/viewvc/hadoop/common/site/main/publish/committer_criteria.pdf?rev=1810621=1810620=1810621=diff == Binary files - no diff available. Modified: hadoop/common/site/main/publish/index.pdf URL: http://svn.apache.org/viewvc/hadoop/common/site/main/publish/index.pdf?rev=1810621=1810620=1810621=diff == Binary files - no diff available. Modified: hadoop/common/site/main/publish/issue_tracking.pdf URL: http://svn.apache.org/viewvc/hadoop/common/site/main/publish/issue_tracking.pdf?rev=1810621=1810620=1810621=diff == Binary files - no diff available. Modified: hadoop/common/site/main/publish/linkmap.pdf URL: http://svn.apache.org/viewvc/hadoop/common/site/main/publish/linkmap.pdf?rev=1810621=1810620=1810621=diff == Binary files - no diff available. Modified: hadoop/common/site/main/publish/mailing_lists.pdf URL: http://svn.apache.org/viewvc/hadoop/common/site/main/publish/mailing_lists.pdf?rev=1810621=1810620=1810621=diff == Binary files - no diff available. Modified: hadoop/common/site/main/publish/privacy_policy.pdf URL: http://svn.apache.org/viewvc/hadoop/common/site/main/publish/privacy_policy.pdf?rev=1810621=1810620=1810621=diff == Binary files - no diff available. Modified: hadoop/common/site/main/publish/releases.pdf URL: http://svn.apache.org/viewvc/hadoop/common/site/main/publish/releases.pdf?rev=1810621=1810620=1810621=diff == Binary files - no diff available. Modified: hadoop/common/site/main/publish/version_control.pdf URL: http://svn.apache.org/viewvc/hadoop/common/site/main/publish/version_control.pdf?rev=1810621=1810620=1810621=diff == Binary files - no diff available. Modified: hadoop/common/site/main/publish/versioning.pdf URL: http://svn.apache.org/viewvc/hadoop/common/site/main/publish/versioning.pdf?rev=1810621=1810620=1810621=diff == Binary files - no diff available. Modified: hadoop/common/site/main/publish/who.html URL: http://svn.apache.org/viewvc/hadoop/common/site/main/publish/who.html?rev=1810621=1810620=1810621=diff == --- hadoop/common/site/main/publish/who.html (original) +++ hadoop/common/site/main/publish/who.html Tue Oct 3 03:21:15 2017 @@ -2170,6 +2170,17 @@ document.write("Last Published: " + docu +sidharta + Sidharta Seethana + Hortonworks + + -8
hadoop git commit: MAPREDUCE-6971. Moving logging APIs over to slf4j in hadoop-mapreduce-client-app. Contributed by Jinjiang Ling.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 dcc454b4a -> 47aa044fc MAPREDUCE-6971. Moving logging APIs over to slf4j in hadoop-mapreduce-client-app. Contributed by Jinjiang Ling. (cherry picked from commit 453d48bdfbb67ed3e66c33c4aef239c3d7bdd3bc) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47aa044f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47aa044f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47aa044f Branch: refs/heads/branch-3.0 Commit: 47aa044fccbcbb7249a0c396eca9451c09faf72d Parents: dcc454b Author: Akira AjisakaAuthored: Tue Oct 3 12:14:54 2017 +0900 Committer: Akira Ajisaka Committed: Tue Oct 3 12:15:43 2017 +0900 -- .../hadoop/mapred/LocalContainerLauncher.java| 13 +++-- .../hadoop/mapred/TaskAttemptListenerImpl.java | 11 ++- .../java/org/apache/hadoop/mapred/YarnChild.java | 19 --- .../apache/hadoop/mapred/YarnOutputFiles.java| 2 -- .../jobhistory/JobHistoryCopyService.java| 7 --- .../jobhistory/JobHistoryEventHandler.java | 6 +++--- .../hadoop/mapreduce/v2/app/MRAppMaster.java | 15 ++- .../mapreduce/v2/app/TaskHeartbeatHandler.java | 7 --- .../mapreduce/v2/app/client/MRClientService.java | 6 +++--- .../v2/app/commit/CommitterEventHandler.java | 8 .../mapreduce/v2/app/job/impl/JobImpl.java | 6 +++--- .../v2/app/job/impl/TaskAttemptImpl.java | 7 --- .../mapreduce/v2/app/job/impl/TaskImpl.java | 6 +++--- .../v2/app/launcher/ContainerLauncherImpl.java | 7 --- .../v2/app/local/LocalContainerAllocator.java| 8 .../mapreduce/v2/app/rm/RMCommunicator.java | 7 --- .../v2/app/rm/RMContainerAllocator.java | 6 +++--- .../v2/app/rm/RMContainerRequestor.java | 7 --- .../preemption/CheckpointAMPreemptionPolicy.java | 8 .../rm/preemption/KillAMPreemptionPolicy.java| 8 .../v2/app/speculate/DefaultSpeculator.java | 7 --- .../mapreduce/v2/app/webapp/AppController.java | 7 --- .../mapred/TestLocalContainerLauncher.java | 8 .../jobhistory/TestJobHistoryEventHandler.java | 8 .../mapreduce/jobhistory/TestJobSummary.java | 7 --- .../apache/hadoop/mapreduce/v2/app/MRApp.java| 6 +++--- .../hadoop/mapreduce/v2/app/MRAppBenchmark.java | 8 +++- .../hadoop/mapreduce/v2/app/TestMRAppMaster.java | 15 +++ .../hadoop/mapreduce/v2/app/TestRecovery.java| 6 +++--- .../mapreduce/v2/app/TestRuntimeEstimators.java | 7 --- .../v2/app/job/impl/TestMapReduceChildJVM.java | 7 --- .../mapreduce/v2/app/job/impl/TestTaskImpl.java | 6 +++--- .../v2/app/launcher/TestContainerLauncher.java | 9 + .../app/launcher/TestContainerLauncherImpl.java | 7 --- .../v2/app/rm/TestRMContainerAllocator.java | 8 35 files changed, 142 insertions(+), 138 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/47aa044f/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java index 0b942b0..6f9cc34 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java @@ -37,8 +37,6 @@ import java.util.concurrent.LinkedBlockingQueue; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FSError; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; @@ -70,6 +68,8 @@ import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Runs the container task locally in a thread. @@ -80,7 +80,8 @@ public class LocalContainerLauncher extends AbstractService implements ContainerLauncher { private static final File curDir = new File("."); -
hadoop git commit: MAPREDUCE-6971. Moving logging APIs over to slf4j in hadoop-mapreduce-client-app. Contributed by Jinjiang Ling.
Repository: hadoop Updated Branches: refs/heads/trunk 7eb84 -> 453d48bdf MAPREDUCE-6971. Moving logging APIs over to slf4j in hadoop-mapreduce-client-app. Contributed by Jinjiang Ling. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/453d48bd Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/453d48bd Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/453d48bd Branch: refs/heads/trunk Commit: 453d48bdfbb67ed3e66c33c4aef239c3d7bdd3bc Parents: 7eb8499 Author: Akira AjisakaAuthored: Tue Oct 3 12:14:54 2017 +0900 Committer: Akira Ajisaka Committed: Tue Oct 3 12:14:54 2017 +0900 -- .../hadoop/mapred/LocalContainerLauncher.java| 13 +++-- .../hadoop/mapred/TaskAttemptListenerImpl.java | 11 ++- .../java/org/apache/hadoop/mapred/YarnChild.java | 19 --- .../apache/hadoop/mapred/YarnOutputFiles.java| 2 -- .../jobhistory/JobHistoryCopyService.java| 7 --- .../jobhistory/JobHistoryEventHandler.java | 6 +++--- .../hadoop/mapreduce/v2/app/MRAppMaster.java | 15 ++- .../mapreduce/v2/app/TaskHeartbeatHandler.java | 7 --- .../mapreduce/v2/app/client/MRClientService.java | 6 +++--- .../v2/app/commit/CommitterEventHandler.java | 8 .../mapreduce/v2/app/job/impl/JobImpl.java | 6 +++--- .../v2/app/job/impl/TaskAttemptImpl.java | 7 --- .../mapreduce/v2/app/job/impl/TaskImpl.java | 6 +++--- .../v2/app/launcher/ContainerLauncherImpl.java | 7 --- .../v2/app/local/LocalContainerAllocator.java| 8 .../mapreduce/v2/app/rm/RMCommunicator.java | 7 --- .../v2/app/rm/RMContainerAllocator.java | 6 +++--- .../v2/app/rm/RMContainerRequestor.java | 7 --- .../preemption/CheckpointAMPreemptionPolicy.java | 8 .../rm/preemption/KillAMPreemptionPolicy.java| 8 .../v2/app/speculate/DefaultSpeculator.java | 7 --- .../mapreduce/v2/app/webapp/AppController.java | 7 --- .../mapred/TestLocalContainerLauncher.java | 8 .../jobhistory/TestJobHistoryEventHandler.java | 8 .../mapreduce/jobhistory/TestJobSummary.java | 7 --- .../apache/hadoop/mapreduce/v2/app/MRApp.java| 6 +++--- .../hadoop/mapreduce/v2/app/MRAppBenchmark.java | 8 +++- .../hadoop/mapreduce/v2/app/TestMRAppMaster.java | 15 +++ .../hadoop/mapreduce/v2/app/TestRecovery.java| 6 +++--- .../mapreduce/v2/app/TestRuntimeEstimators.java | 7 --- .../v2/app/job/impl/TestMapReduceChildJVM.java | 7 --- .../mapreduce/v2/app/job/impl/TestTaskImpl.java | 6 +++--- .../v2/app/launcher/TestContainerLauncher.java | 9 + .../app/launcher/TestContainerLauncherImpl.java | 7 --- .../v2/app/rm/TestRMContainerAllocator.java | 8 35 files changed, 142 insertions(+), 138 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/453d48bd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java index 0b942b0..6f9cc34 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/LocalContainerLauncher.java @@ -37,8 +37,6 @@ import java.util.concurrent.LinkedBlockingQueue; import com.google.common.annotations.VisibleForTesting; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FSError; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileStatus; @@ -70,6 +68,8 @@ import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Runs the container task locally in a thread. @@ -80,7 +80,8 @@ public class LocalContainerLauncher extends AbstractService implements ContainerLauncher { private static final File curDir = new File("."); - private static final Log LOG = LogFactory.getLog(LocalContainerLauncher.class); +
hadoop git commit: YARN-7226. Whitelisted variables do not support delayed variable expansion. Contributed by Jason Lowe
Repository: hadoop Updated Branches: refs/heads/branch-3.0 c2f751cb0 -> dcc454b4a YARN-7226. Whitelisted variables do not support delayed variable expansion. Contributed by Jason Lowe (cherry picked from commit 7eb846869cdb63743f1c9eca2ba91d57ad08) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcc454b4 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcc454b4 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcc454b4 Branch: refs/heads/branch-3.0 Commit: dcc454b4a05b3b7f3276143cf7693729c3256d5f Parents: c2f751c Author: Sidharta SAuthored: Mon Oct 2 19:04:49 2017 -0700 Committer: Sidharta S Committed: Mon Oct 2 19:32:19 2017 -0700 -- .../server/nodemanager/ContainerExecutor.java | 44 .../nodemanager/LinuxContainerExecutor.java | 8 +++ .../launcher/ContainerLaunch.java | 25 --- .../runtime/DefaultLinuxContainerRuntime.java | 6 ++ .../DelegatingLinuxContainerRuntime.java| 11 +++ .../runtime/DockerLinuxContainerRuntime.java| 7 ++ .../runtime/ContainerRuntime.java | 11 +++ .../launcher/TestContainerLaunch.java | 73 ++-- 8 files changed, 141 insertions(+), 44 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcc454b4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 5fd059d..0b4dbc9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -26,10 +26,8 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -95,10 +93,15 @@ public abstract class ContainerExecutor implements Configurable { private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); private final ReadLock readLock = lock.readLock(); private final WriteLock writeLock = lock.writeLock(); + private String[] whitelistVars; @Override public void setConf(Configuration conf) { this.conf = conf; +if (conf != null) { + whitelistVars = conf.get(YarnConfiguration.NM_ENV_WHITELIST, + YarnConfiguration.DEFAULT_NM_ENV_WHITELIST).split(","); +} } @Override @@ -331,6 +334,8 @@ public abstract class ContainerExecutor implements Configurable { public void writeLaunchEnv(OutputStream out, Map environment, Map resources, List command, Path logDir, String user, String outFilename) throws IOException { +updateEnvForWhitelistVars(environment); + ContainerLaunch.ShellScriptBuilder sb = ContainerLaunch.ShellScriptBuilder.create(); @@ -341,22 +346,11 @@ public abstract class ContainerExecutor implements Configurable { sb.stdout(logDir, CONTAINER_PRE_LAUNCH_STDOUT); sb.stderr(logDir, CONTAINER_PRE_LAUNCH_STDERR); -Set whitelist = new HashSet<>(); - -String[] nmWhiteList = conf.get(YarnConfiguration.NM_ENV_WHITELIST, -YarnConfiguration.DEFAULT_NM_ENV_WHITELIST).split(","); -for (String param : nmWhiteList) { - whitelist.add(param); -} if (environment != null) { sb.echo("Setting up env variables"); for (Map.Entry env : environment.entrySet()) { -if (!whitelist.contains(env.getKey())) { - sb.env(env.getKey(), env.getValue()); -} else { - sb.whitelistedEnv(env.getKey(), env.getValue()); -} +sb.env(env.getKey(), env.getValue()); } } @@ -658,6 +652,28 @@ public abstract class ContainerExecutor implements Configurable { } /** + * Propagate variables from the nodemanager's environment into the + * container's environment if unspecified by the container. + * @param env the
hadoop git commit: YARN-7226. Whitelisted variables do not support delayed variable expansion. Contributed by Jason Lowe
Repository: hadoop Updated Branches: refs/heads/trunk d4d2fd1ac -> 7eb84 YARN-7226. Whitelisted variables do not support delayed variable expansion. Contributed by Jason Lowe Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7eb84999 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7eb84999 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7eb84999 Branch: refs/heads/trunk Commit: 7eb846869cdb63743f1c9eca2ba91d57ad08 Parents: d4d2fd1 Author: Sidharta SAuthored: Mon Oct 2 19:04:49 2017 -0700 Committer: Sidharta S Committed: Mon Oct 2 19:04:49 2017 -0700 -- .../server/nodemanager/ContainerExecutor.java | 44 .../nodemanager/LinuxContainerExecutor.java | 8 +++ .../launcher/ContainerLaunch.java | 25 --- .../runtime/DefaultLinuxContainerRuntime.java | 6 ++ .../DelegatingLinuxContainerRuntime.java| 11 +++ .../runtime/DockerLinuxContainerRuntime.java| 7 ++ .../runtime/ContainerRuntime.java | 11 +++ .../launcher/TestContainerLaunch.java | 73 ++-- 8 files changed, 141 insertions(+), 44 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7eb84999/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index 5fd059d..0b4dbc9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -26,10 +26,8 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -95,10 +93,15 @@ public abstract class ContainerExecutor implements Configurable { private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); private final ReadLock readLock = lock.readLock(); private final WriteLock writeLock = lock.writeLock(); + private String[] whitelistVars; @Override public void setConf(Configuration conf) { this.conf = conf; +if (conf != null) { + whitelistVars = conf.get(YarnConfiguration.NM_ENV_WHITELIST, + YarnConfiguration.DEFAULT_NM_ENV_WHITELIST).split(","); +} } @Override @@ -331,6 +334,8 @@ public abstract class ContainerExecutor implements Configurable { public void writeLaunchEnv(OutputStream out, Map environment, Map resources, List command, Path logDir, String user, String outFilename) throws IOException { +updateEnvForWhitelistVars(environment); + ContainerLaunch.ShellScriptBuilder sb = ContainerLaunch.ShellScriptBuilder.create(); @@ -341,22 +346,11 @@ public abstract class ContainerExecutor implements Configurable { sb.stdout(logDir, CONTAINER_PRE_LAUNCH_STDOUT); sb.stderr(logDir, CONTAINER_PRE_LAUNCH_STDERR); -Set whitelist = new HashSet<>(); - -String[] nmWhiteList = conf.get(YarnConfiguration.NM_ENV_WHITELIST, -YarnConfiguration.DEFAULT_NM_ENV_WHITELIST).split(","); -for (String param : nmWhiteList) { - whitelist.add(param); -} if (environment != null) { sb.echo("Setting up env variables"); for (Map.Entry env : environment.entrySet()) { -if (!whitelist.contains(env.getKey())) { - sb.env(env.getKey(), env.getValue()); -} else { - sb.whitelistedEnv(env.getKey(), env.getValue()); -} +sb.env(env.getKey(), env.getValue()); } } @@ -658,6 +652,28 @@ public abstract class ContainerExecutor implements Configurable { } /** + * Propagate variables from the nodemanager's environment into the + * container's environment if unspecified by the container. + * @param env the environment to update + * @see
hadoop git commit: HDFS-12580. Rebasing HDFS-10467 after HDFS-12447. Contributed by Inigo Goiri.
Repository: hadoop Updated Branches: refs/heads/HDFS-10467 34d5dc3a4 -> 39305aff6 HDFS-12580. Rebasing HDFS-10467 after HDFS-12447. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39305aff Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39305aff Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39305aff Branch: refs/heads/HDFS-10467 Commit: 39305aff644dba51e7e5d4274317b9c80330a209 Parents: 34d5dc3 Author: Inigo GoiriAuthored: Mon Oct 2 18:45:06 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:45:06 2017 -0700 -- .../hadoop/hdfs/server/federation/router/RouterRpcServer.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/39305aff/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 1fa1720..650c6ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -64,7 +64,7 @@ import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.inotify.EventBatchList; -import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AddErasureCodingPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; @@ -1857,8 +1857,8 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { } @Override - public AddECPolicyResponse[] addErasureCodingPolicies( - ErasureCodingPolicy[] arg0) throws IOException { + public AddErasureCodingPolicyResponse[] addErasureCodingPolicies( + ErasureCodingPolicy[] policies) throws IOException { checkOperation(OperationCategory.WRITE, false); return null; } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[02/29] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a67299a6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java new file mode 100644 index 000..2d74505 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java @@ -0,0 +1,284 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.resolver; + +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMESERVICES; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.ROUTERS; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyException; +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.clearRecords; +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration; +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.newStateStore; +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.waitStateStore; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreUnavailableException; +import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Test the basic {@link ActiveNamenodeResolver} functionality. + */ +public class TestNamenodeResolver { + + private static StateStoreService stateStore; + private static ActiveNamenodeResolver namenodeResolver; + + @BeforeClass + public static void create() throws Exception { + +Configuration conf = getStateStoreConfiguration(); + +// Reduce expirations to 5 seconds +conf.setLong( +DFSConfigKeys.FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS, +TimeUnit.SECONDS.toMillis(5)); + +stateStore = newStateStore(conf); +assertNotNull(stateStore); + +namenodeResolver = new MembershipNamenodeResolver(conf, stateStore); +namenodeResolver.setRouterId(ROUTERS[0]); + } + + @AfterClass + public static void destroy() throws Exception { +stateStore.stop(); +stateStore.close(); + } + + @Before + public void setup() throws IOException, InterruptedException { +// Wait for state store to connect +stateStore.loadDriver(); +waitStateStore(stateStore, 1); + +// Clear NN registrations +boolean cleared = clearRecords(stateStore, MembershipState.class); +assertTrue(cleared); + } + + @Test + public void testStateStoreDisconnected() throws Exception { + +// Add an entry to the store +NamenodeStatusReport report = createNamenodeReport( +NAMESERVICES[0], NAMENODES[0], HAServiceState.ACTIVE); +assertTrue(namenodeResolver.registerNamenode(report)); + +// Close the data store driver +stateStore.closeDriver(); +assertFalse(stateStore.isDriverReady()); + +// Flush the caches +stateStore.refreshCaches(true); + +// Verify commands
[20/29] hadoop git commit: HDFS-10882. Federation State Store Interface API. Contributed by Jason Kace and Inigo Goiri.
HDFS-10882. Federation State Store Interface API. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a9dddf2e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a9dddf2e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a9dddf2e Branch: refs/heads/HDFS-10467 Commit: a9dddf2ec34fe82c8317c97f7ffeed84f1a71275 Parents: fdd1fe9 Author: InigoAuthored: Thu Apr 6 19:18:52 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:14 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 11 ++ .../server/federation/store/RecordStore.java| 100 .../store/driver/StateStoreSerializer.java | 119 +++ .../driver/impl/StateStoreSerializerPBImpl.java | 115 ++ .../store/records/impl/pb/PBRecord.java | 47 .../store/records/impl/pb/package-info.java | 29 + .../src/main/resources/hdfs-default.xml | 8 ++ 7 files changed, 429 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9dddf2e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 136665b..b8f13f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker; +import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl; import org.apache.hadoop.http.HttpConfig; /** @@ -1126,6 +1127,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT = "org.apache.hadoop.hdfs.server.federation.MockResolver"; + // HDFS Router-based federation State Store + public static final String FEDERATION_STORE_PREFIX = + FEDERATION_ROUTER_PREFIX + "store."; + + public static final String FEDERATION_STORE_SERIALIZER_CLASS = + DFSConfigKeys.FEDERATION_STORE_PREFIX + "serializer"; + public static final Class + FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT = + StateStoreSerializerPBImpl.class; + // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry @Deprecated public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY http://git-wip-us.apache.org/repos/asf/hadoop/blob/a9dddf2e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java new file mode 100644 index 000..524f432 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store; + +import java.lang.reflect.Constructor; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import
[29/29] hadoop git commit: HDFS-10631. Federation State Store ZooKeeper implementation. Contributed by Jason Kace and Inigo Goiri.
HDFS-10631. Federation State Store ZooKeeper implementation. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0f3daed9 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0f3daed9 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0f3daed9 Branch: refs/heads/HDFS-10467 Commit: 0f3daed9850d591fc88d4baeb7246a690d848425 Parents: 41ac038 Author: Inigo GoiriAuthored: Mon Aug 21 11:40:41 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:15 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 9 + .../driver/impl/StateStoreSerializableImpl.java | 19 ++ .../driver/impl/StateStoreZooKeeperImpl.java| 298 +++ .../store/driver/TestStateStoreDriverBase.java | 2 +- .../store/driver/TestStateStoreZK.java | 105 +++ 5 files changed, 432 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f3daed9/hadoop-hdfs-project/hadoop-hdfs/pom.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 93216db..d22d6ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -203,6 +203,15 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;> com.fasterxml.jackson.core jackson-databind + + org.apache.curator + curator-framework + + + org.apache.curator + curator-test + test + http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f3daed9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java index e9b3fdf..e2038fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java @@ -30,6 +30,11 @@ import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; */ public abstract class StateStoreSerializableImpl extends StateStoreBaseImpl { + /** Mark for slashes in path names. */ + protected static final String SLASH_MARK = "0SLASH0"; + /** Mark for colon in path names. */ + protected static final String COLON_MARK = "_"; + /** Default serializer for this driver. */ private StateStoreSerializer serializer; @@ -74,4 +79,18 @@ public abstract class StateStoreSerializableImpl extends StateStoreBaseImpl { String data, Class clazz, boolean includeDates) throws IOException { return serializer.deserialize(data, clazz); } + + /** + * Get the primary key for a record. If we don't want to store in folders, we + * need to remove / from the name. + * + * @param record Record to get the primary key for. + * @return Primary key for the record. + */ + protected static String getPrimaryKey(BaseRecord record) { +String primaryKey = record.getPrimaryKey(); +primaryKey = primaryKey.replaceAll("/", SLASH_MARK); +primaryKey = primaryKey.replaceAll(":", COLON_MARK); +return primaryKey; + } } \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/0f3daed9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java new file mode 100644 index 000..ddcd537 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java @@ -0,0 +1,298 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you
[01/29] hadoop git commit: YARN-2037. Add work preserving restart support for Unmanaged AMs. (Botong Huang via Subru). [Forced Update!]
Repository: hadoop Updated Branches: refs/heads/HDFS-10467 1ec7e00e8 -> 34d5dc3a4 (forced update) YARN-2037. Add work preserving restart support for Unmanaged AMs. (Botong Huang via Subru). Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4d2fd1a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4d2fd1a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4d2fd1a Branch: refs/heads/HDFS-10467 Commit: d4d2fd1acd2fdddf04f45e67897804eea30d79a1 Parents: 015abcd Author: Subru KrishnanAuthored: Mon Oct 2 18:14:44 2017 -0700 Committer: Subru Krishnan Committed: Mon Oct 2 18:14:44 2017 -0700 -- .../yarn/api/ApplicationMasterProtocol.java | 21 ++- .../records/ApplicationSubmissionContext.java | 17 +- .../ApplicationMasterService.java | 26 +-- .../resourcemanager/DefaultAMSProcessor.java| 5 + .../rmapp/attempt/RMAppAttemptImpl.java | 6 +- .../scheduler/AbstractYarnScheduler.java| 13 +- .../TestWorkPreservingUnmanagedAM.java | 159 +++ 7 files changed, 214 insertions(+), 33 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4d2fd1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationMasterProtocol.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationMasterProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationMasterProtocol.java index 4d78961..eb40fc7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationMasterProtocol.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationMasterProtocol.java @@ -55,27 +55,32 @@ public interface ApplicationMasterProtocol { * The interface used by a new ApplicationMaster to register with * the ResourceManager. * - * + * * * The ApplicationMaster needs to provide details such as RPC * Port, HTTP tracking url etc. as specified in * {@link RegisterApplicationMasterRequest}. * - * + * * * The ResourceManager responds with critical details such as * maximum resource capabilities in the cluster as specified in * {@link RegisterApplicationMasterResponse}. * - * - * @param request - * registration request + * + * + * Re-register is only allowed for Unmanaged Application Master + * (UAM) HA, with + * {@link org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext#getKeepContainersAcrossApplicationAttempts()} + * set to true. + * + * + * @param request registration request * @return registration respose * @throws YarnException * @throws IOException - * @throws InvalidApplicationMasterRequestException - * The exception is thrown when an ApplicationMaster tries to - * register more then once. + * @throws InvalidApplicationMasterRequestException The exception is thrown + * when an ApplicationMaster tries to register more then once. * @see RegisterApplicationMasterRequest * @see RegisterApplicationMasterResponse */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4d2fd1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java index 4f1d147..a6bbca7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java @@ -395,15 +395,18 @@ public abstract class ApplicationSubmissionContext { * Set the flag which indicates whether to keep containers across application * attempts. * - * If the flag is true, running containers will not be killed when application - * attempt fails and these containers will be retrieved by the new application - * attempt on registration via + * For managed AM, if the flag is true, running containers will not be killed + * when application attempt fails and these containers will be retrieved by + * the new application
[07/29] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d135a61d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java new file mode 100644 index 000..3a32be1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -0,0 +1,856 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.router; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * A client proxy for Router -> NN communication using the NN ClientProtocol. + * + * Provides routers to invoke remote ClientProtocol methods and handle + * retries/failover. + * + * invokeSingle Make a single request to a single namespace + * invokeSequential Make a sequential series of requests to multiple + * ordered namespaces until a condition is met. + * invokeConcurrent Make concurrent requests to multiple namespaces and + * return all of the results. + * + * Also maintains a cached pool of connections to NNs. Connections are managed + * by the ConnectionManager and are unique to each user + NN. The size of the + * connection pool can be configured. Larger pools allow for more simultaneous + * requests to a single NN from a single user. + */ +public class RouterRpcClient { + + private static final Logger LOG = + LoggerFactory.getLogger(RouterRpcClient.class); + + + /** Router identifier. */ + private final String routerId; + + /** Interface to identify the active NN for a nameservice or blockpool ID. */ + private final ActiveNamenodeResolver namenodeResolver; + + /** Connection pool to the Namenodes per user for performance. */ + private final ConnectionManager connectionManager; + /** Service to run asynchronous calls. */ + private final ExecutorService executorService; + /** Retry policy for router -> NN communication. */ + private final RetryPolicy retryPolicy; + + /** Pattern to parse a stack trace line. */ + private static final Pattern STACK_TRACE_PATTERN = + Pattern.compile("\\tat (.*)\\.(.*)\\((.*):(\\d*)\\)"); + + + /** + * Create a router RPC
[18/29] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb6986ac/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java new file mode 100644 index 000..170247f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java @@ -0,0 +1,261 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.synchronizeRecords; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext; +import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.util.Time; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * The administrator interface of the {@link Router} implemented by + * {@link RouterAdminServer}. + */ +public class TestRouterAdmin { + + private static StateStoreDFSCluster cluster; + private static RouterContext routerContext; + public static final String RPC_BEAN = + "Hadoop:service=Router,name=FederationRPC"; + private static List mockMountTable; + private static StateStoreService stateStore; + + @BeforeClass + public static void globalSetUp() throws Exception { +cluster = new StateStoreDFSCluster(false, 1); +// Build and start a router with State Store + admin + RPC +Configuration conf = new RouterConfigBuilder() +.stateStore() +.admin() +.rpc() +.build(); +cluster.addRouterOverrides(conf); +cluster.startRouters(); +routerContext = cluster.getRandomRouter(); +mockMountTable = cluster.generateMockMountTable(); +Router router = routerContext.getRouter(); +stateStore = router.getStateStore(); + } + + @AfterClass + public static void tearDown() { +cluster.stopRouter(routerContext); + } + + @Before + public void testSetup() throws Exception { +assertTrue( +synchronizeRecords(stateStore, mockMountTable, MountTable.class)); + } + + @Test + public void testAddMountTable() throws IOException { +MountTable newEntry = MountTable.newInstance( +"/testpath", Collections.singletonMap("ns0", "/testdir"), +Time.now(), Time.now()); + +RouterClient client = routerContext.getAdminClient(); +MountTableManager mountTable = client.getMountTableManager(); + +// Existing mount table size +List records =
[27/29] hadoop git commit: HDFS-12384. Fixing compilation issue with BanDuplicateClasses. Contributed by Inigo Goiri.
HDFS-12384. Fixing compilation issue with BanDuplicateClasses. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06d9cea0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06d9cea0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06d9cea0 Branch: refs/heads/HDFS-10467 Commit: 06d9cea05b26175e01e8fa63508bade8051db083 Parents: 0f3daed Author: Inigo GoiriAuthored: Thu Sep 7 13:53:08 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:15 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 4 .../server/federation/router/RouterRpcServer.java| 15 +++ 2 files changed, 15 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/06d9cea0/hadoop-hdfs-project/hadoop-hdfs/pom.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index d22d6ee..0fe491b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -205,10 +205,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;> org.apache.curator - curator-framework - - - org.apache.curator curator-test test http://git-wip-us.apache.org/repos/asf/hadoop/blob/06d9cea0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index c77d255..f9b4a5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -81,6 +81,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -91,6 +92,7 @@ import org.apache.hadoop.hdfs.protocol.OpenFileEntry; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB; @@ -1607,6 +1609,19 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { } @Override // ClientProtocol + public void reencryptEncryptionZone(String zone, ReencryptAction action) + throws IOException { +checkOperation(OperationCategory.WRITE, false); + } + + @Override // ClientProtocol + public BatchedEntries listReencryptionStatus( + long prevId) throws IOException { +checkOperation(OperationCategory.READ, false); +return null; + } + + @Override // ClientProtocol public void setXAttr(String src, XAttr xAttr, EnumSet flag) throws IOException { checkOperation(OperationCategory.WRITE); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[19/29] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.
HDFS-10646. Federation admin tool. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb6986ac Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb6986ac Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb6986ac Branch: refs/heads/HDFS-10467 Commit: cb6986ac4c57b8ee5403d95b06f9623579002a3a Parents: a93a765 Author: Inigo GoiriAuthored: Tue Aug 8 14:44:43 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:14 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 1 + .../hadoop-hdfs/src/main/bin/hdfs | 5 + .../hadoop-hdfs/src/main/bin/hdfs.cmd | 7 +- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 19 ++ .../hdfs/protocolPB/RouterAdminProtocolPB.java | 44 +++ ...uterAdminProtocolServerSideTranslatorPB.java | 151 .../RouterAdminProtocolTranslatorPB.java| 150 .../resolver/MembershipNamenodeResolver.java| 34 +- .../hdfs/server/federation/router/Router.java | 52 +++ .../federation/router/RouterAdminServer.java| 183 ++ .../server/federation/router/RouterClient.java | 76 + .../hdfs/tools/federation/RouterAdmin.java | 341 +++ .../hdfs/tools/federation/package-info.java | 28 ++ .../src/main/proto/RouterProtocol.proto | 47 +++ .../src/main/resources/hdfs-default.xml | 46 +++ .../server/federation/RouterConfigBuilder.java | 26 ++ .../server/federation/RouterDFSCluster.java | 43 ++- .../server/federation/StateStoreDFSCluster.java | 148 .../federation/router/TestRouterAdmin.java | 261 ++ 19 files changed, 1644 insertions(+), 18 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb6986ac/hadoop-hdfs-project/hadoop-hdfs/pom.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index cc7a975..93216db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -332,6 +332,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;> editlog.proto fsimage.proto FederationProtocol.proto + RouterProtocol.proto http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb6986ac/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index b1f44a4..d51a8e2 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -31,6 +31,7 @@ function hadoop_usage hadoop_add_option "--hosts filename" "list of hosts to use in worker mode" hadoop_add_option "--workers" "turn on worker mode" +<<< HEAD hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility" hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache" hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries" @@ -42,6 +43,7 @@ function hadoop_usage hadoop_add_subcommand "diskbalancer" daemon "Distributes data evenly among disks on a given node" hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI" + hadoop_add_subcommand "federation" admin "manage Router-based federation" hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the NameNode" hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility" hadoop_add_subcommand "getconf" client "get config values from configuration" @@ -181,6 +183,9 @@ function hdfscmd_case HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.Router' ;; +federation) + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.federation.RouterAdmin' +;; secondarynamenode) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb6986ac/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd index b9853d6..53bdf70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd +++
[16/29] hadoop git commit: HDFS-10880. Federation Mount Table State Store internal API. Contributed by Jason Kace and Inigo Goiri.
HDFS-10880. Federation Mount Table State Store internal API. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a93a7655 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a93a7655 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a93a7655 Branch: refs/heads/HDFS-10467 Commit: a93a7655d98d53a5a3fd5477a40275d79346f462 Parents: a3c44f7 Author: Inigo GoiriAuthored: Fri Aug 4 18:00:12 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:14 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 7 +- .../federation/resolver/MountTableManager.java | 80 +++ .../federation/resolver/MountTableResolver.java | 544 +++ .../federation/resolver/PathLocation.java | 124 - .../resolver/order/DestinationOrder.java| 29 + .../federation/resolver/order/package-info.java | 29 + .../federation/router/FederationUtil.java | 56 +- .../hdfs/server/federation/router/Router.java | 3 +- .../federation/store/MountTableStore.java | 49 ++ .../federation/store/StateStoreService.java | 2 + .../store/impl/MountTableStoreImpl.java | 116 .../protocol/AddMountTableEntryRequest.java | 47 ++ .../protocol/AddMountTableEntryResponse.java| 42 ++ .../protocol/GetMountTableEntriesRequest.java | 49 ++ .../protocol/GetMountTableEntriesResponse.java | 53 ++ .../protocol/RemoveMountTableEntryRequest.java | 49 ++ .../protocol/RemoveMountTableEntryResponse.java | 42 ++ .../protocol/UpdateMountTableEntryRequest.java | 51 ++ .../protocol/UpdateMountTableEntryResponse.java | 43 ++ .../pb/AddMountTableEntryRequestPBImpl.java | 84 +++ .../pb/AddMountTableEntryResponsePBImpl.java| 76 +++ .../pb/GetMountTableEntriesRequestPBImpl.java | 76 +++ .../pb/GetMountTableEntriesResponsePBImpl.java | 104 .../pb/RemoveMountTableEntryRequestPBImpl.java | 76 +++ .../pb/RemoveMountTableEntryResponsePBImpl.java | 76 +++ .../pb/UpdateMountTableEntryRequestPBImpl.java | 96 .../pb/UpdateMountTableEntryResponsePBImpl.java | 76 +++ .../federation/store/records/MountTable.java| 301 ++ .../store/records/impl/pb/MountTablePBImpl.java | 213 .../src/main/proto/FederationProtocol.proto | 61 ++- .../hdfs/server/federation/MockResolver.java| 9 +- .../resolver/TestMountTableResolver.java| 396 ++ .../store/FederationStateStoreTestUtils.java| 16 + .../store/TestStateStoreMountTable.java | 250 + .../store/driver/TestStateStoreDriverBase.java | 12 + .../store/records/TestMountTable.java | 176 ++ 36 files changed, 3437 insertions(+), 76 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a93a7655/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index f0b0c63..4a8ddfc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -27,6 +27,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; @@ -1178,8 +1180,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { // HDFS Router State Store connection public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS = FEDERATION_ROUTER_PREFIX + "file.resolver.client.class"; - public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT = - "org.apache.hadoop.hdfs.server.federation.MockResolver"; + public static final Class + FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT = + MountTableResolver.class; public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS = FEDERATION_ROUTER_PREFIX
[03/29] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a67299a6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java new file mode 100644 index 000..1f0d556 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer; + +/** + * API response for overriding an existing namenode registration in the state + * store. + */ +public abstract class UpdateNamenodeRegistrationResponse { + + public static UpdateNamenodeRegistrationResponse newInstance() { +return StateStoreSerializer.newRecord( +UpdateNamenodeRegistrationResponse.class); + } + + public static UpdateNamenodeRegistrationResponse newInstance(boolean status) + throws IOException { +UpdateNamenodeRegistrationResponse response = newInstance(); +response.setResult(status); +return response; + } + + @Private + @Unstable + public abstract boolean getResult(); + + @Private + @Unstable + public abstract void setResult(boolean result); +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/a67299a6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java new file mode 100644 index 000..baad113 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb; + +import java.io.IOException; +import java.lang.reflect.Method; + +import org.apache.commons.codec.binary.Base64; + +import com.google.protobuf.GeneratedMessage; +import com.google.protobuf.Message; +import com.google.protobuf.Message.Builder; +import com.google.protobuf.MessageOrBuilder; + +/** + * Helper class for setting/getting data elements in an object backed by a + * protobuf implementation. + */ +public class FederationProtocolPBTranslator { + + /** Optional proto byte stream used to create this object. */ + private P proto; + /** The class of the proto handler for this
[10/29] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.
HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea78e866 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea78e866 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea78e866 Branch: refs/heads/HDFS-10467 Commit: ea78e86615912d120cb0b48324ce6a42ef902adc Parents: a9dddf2 Author: Inigo GoiriAuthored: Tue May 2 15:49:53 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:14 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 14 + .../federation/router/PeriodicService.java | 198 .../StateStoreConnectionMonitorService.java | 67 +++ .../federation/store/StateStoreService.java | 152 +- .../federation/store/StateStoreUtils.java | 51 +- .../store/driver/StateStoreDriver.java | 31 +- .../driver/StateStoreRecordOperations.java | 17 +- .../store/driver/impl/StateStoreBaseImpl.java | 31 +- .../driver/impl/StateStoreFileBaseImpl.java | 429 .../store/driver/impl/StateStoreFileImpl.java | 161 +++ .../driver/impl/StateStoreFileSystemImpl.java | 178 +++ .../driver/impl/StateStoreSerializableImpl.java | 77 +++ .../federation/store/records/BaseRecord.java| 20 +- .../server/federation/store/records/Query.java | 66 +++ .../src/main/resources/hdfs-default.xml | 16 + .../store/FederationStateStoreTestUtils.java| 232 + .../store/driver/TestStateStoreDriverBase.java | 483 +++ .../store/driver/TestStateStoreFile.java| 64 +++ .../store/driver/TestStateStoreFileSystem.java | 88 19 files changed, 2329 insertions(+), 46 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea78e866/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b8f13f3..10074ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs; +import java.util.concurrent.TimeUnit; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -25,6 +27,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker; +import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; +import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl; import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl; import org.apache.hadoop.http.HttpConfig; @@ -1137,6 +1141,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys { FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT = StateStoreSerializerPBImpl.class; + public static final String FEDERATION_STORE_DRIVER_CLASS = + FEDERATION_STORE_PREFIX + "driver.class"; + public static final Class + FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class; + + public static final String FEDERATION_STORE_CONNECTION_TEST_MS = + FEDERATION_STORE_PREFIX + "connection.test"; + public static final long FEDERATION_STORE_CONNECTION_TEST_MS_DEFAULT = + TimeUnit.MINUTES.toMillis(1); + // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry @Deprecated public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea78e866/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java new file mode 100644 index 000..5e1 --- /dev/null +++
[04/29] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.
HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a67299a6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a67299a6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a67299a6 Branch: refs/heads/HDFS-10467 Commit: a67299a6ab90bb16a521d08753b4f2942b777cff Parents: 317af56 Author: Inigo GoiriAuthored: Mon Jul 31 10:55:21 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:14 2017 -0700 -- .../dev-support/findbugsExcludeFile.xml | 3 + hadoop-hdfs-project/hadoop-hdfs/pom.xml | 1 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 17 +- .../resolver/MembershipNamenodeResolver.java| 290 .../federation/router/FederationUtil.java | 42 +- .../federation/store/CachedRecordStore.java | 237 ++ .../federation/store/MembershipStore.java | 126 + .../federation/store/StateStoreCache.java | 36 ++ .../store/StateStoreCacheUpdateService.java | 67 +++ .../federation/store/StateStoreService.java | 202 +++- .../store/impl/MembershipStoreImpl.java | 311 + .../federation/store/impl/package-info.java | 31 ++ .../GetNamenodeRegistrationsRequest.java| 52 +++ .../GetNamenodeRegistrationsResponse.java | 55 +++ .../store/protocol/GetNamespaceInfoRequest.java | 30 ++ .../protocol/GetNamespaceInfoResponse.java | 52 +++ .../protocol/NamenodeHeartbeatRequest.java | 52 +++ .../protocol/NamenodeHeartbeatResponse.java | 49 ++ .../UpdateNamenodeRegistrationRequest.java | 72 +++ .../UpdateNamenodeRegistrationResponse.java | 51 ++ .../impl/pb/FederationProtocolPBTranslator.java | 145 ++ .../GetNamenodeRegistrationsRequestPBImpl.java | 87 .../GetNamenodeRegistrationsResponsePBImpl.java | 99 .../impl/pb/GetNamespaceInfoRequestPBImpl.java | 60 +++ .../impl/pb/GetNamespaceInfoResponsePBImpl.java | 95 .../impl/pb/NamenodeHeartbeatRequestPBImpl.java | 93 .../pb/NamenodeHeartbeatResponsePBImpl.java | 71 +++ ...UpdateNamenodeRegistrationRequestPBImpl.java | 95 ...pdateNamenodeRegistrationResponsePBImpl.java | 73 +++ .../store/protocol/impl/pb/package-info.java| 29 ++ .../store/records/MembershipState.java | 329 + .../store/records/MembershipStats.java | 126 + .../records/impl/pb/MembershipStatePBImpl.java | 334 + .../records/impl/pb/MembershipStatsPBImpl.java | 191 .../src/main/proto/FederationProtocol.proto | 107 + .../src/main/resources/hdfs-default.xml | 18 +- .../resolver/TestNamenodeResolver.java | 284 .../store/FederationStateStoreTestUtils.java| 23 +- .../federation/store/TestStateStoreBase.java| 81 .../store/TestStateStoreMembershipState.java| 463 +++ .../store/driver/TestStateStoreDriverBase.java | 69 ++- .../store/records/TestMembershipState.java | 129 ++ 42 files changed, 4745 insertions(+), 32 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a67299a6/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 9582fcb..4b958b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -15,6 +15,9 @@ + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/a67299a6/hadoop-hdfs-project/hadoop-hdfs/pom.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 425572f..cc7a975 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -331,6 +331,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;> QJournalProtocol.proto editlog.proto fsimage.proto + FederationProtocol.proto http://git-wip-us.apache.org/repos/asf/hadoop/blob/a67299a6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
[05/29] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d135a61d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java index ee6f57d..2875750 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -43,7 +43,7 @@ import org.apache.hadoop.util.Time; /** * In-memory cache/mock of a namenode and file resolver. Stores the most - * recently updated NN information for each nameservice and block pool. Also + * recently updated NN information for each nameservice and block pool. It also * stores a virtual mount table for resolving global namespace paths to local NN * paths. */ @@ -51,82 +51,93 @@ public class MockResolver implements ActiveNamenodeResolver, FileSubclusterResolver { private Mapresolver = - new HashMap (); - private Map locations = - new HashMap (); - private Set namespaces = - new HashSet(); + new HashMap<>(); + private Map locations = new HashMap<>(); + private Set namespaces = new HashSet<>(); private String defaultNamespace = null; + public MockResolver(Configuration conf, StateStoreService store) { this.cleanRegistrations(); } - public void addLocation(String mount, String nameservice, String location) { -RemoteLocation remoteLocation = new RemoteLocation(nameservice, location); -List locationsList = locations.get(mount); + public void addLocation(String mount, String nsId, String location) { +List locationsList = this.locations.get(mount); if (locationsList == null) { - locationsList = new LinkedList(); - locations.put(mount, locationsList); + locationsList = new LinkedList<>(); + this.locations.put(mount, locationsList); } + +final RemoteLocation remoteLocation = new RemoteLocation(nsId, location); if (!locationsList.contains(remoteLocation)) { locationsList.add(remoteLocation); } if (this.defaultNamespace == null) { - this.defaultNamespace = nameservice; + this.defaultNamespace = nsId; } } public synchronized void cleanRegistrations() { -this.resolver = -new HashMap (); -this.namespaces = new HashSet(); +this.resolver = new HashMap<>(); +this.namespaces = new HashSet<>(); } @Override public void updateActiveNamenode( - String ns, InetSocketAddress successfulAddress) { + String nsId, InetSocketAddress successfulAddress) { String address = successfulAddress.getHostName() + ":" + successfulAddress.getPort(); -String key = ns; +String key = nsId; if (key != null) { // Update the active entry @SuppressWarnings("unchecked") - List iterator = - (List) resolver.get(key); - for (FederationNamenodeContext namenode : iterator) { + List namenodes = + (List) this.resolver.get(key); + for (FederationNamenodeContext namenode : namenodes) { if (namenode.getRpcAddress().equals(address)) { MockNamenodeContext nn = (MockNamenodeContext) namenode; nn.setState(FederationNamenodeServiceState.ACTIVE); break; } } - Collections.sort(iterator, new NamenodePriorityComparator()); + // This operation modifies the list so we need to be careful + synchronized(namenodes) { +Collections.sort(namenodes, new NamenodePriorityComparator()); + } } } @Override public List getNamenodesForNameserviceId(String nameserviceId) { -return resolver.get(nameserviceId); +// Return a copy of the list because it is updated periodically +List namenodes = +this.resolver.get(nameserviceId); +return Collections.unmodifiableList(new ArrayList<>(namenodes)); } @Override public List getNamenodesForBlockPoolId( String blockPoolId) { -return resolver.get(blockPoolId); +// Return a copy of the list because it is updated periodically +List namenodes = +this.resolver.get(blockPoolId); +return Collections.unmodifiableList(new ArrayList<>(namenodes)); } private static class MockNamenodeContext implements FederationNamenodeContext { + +private String namenodeId; +private String nameserviceId; + private String webAddress; private String rpcAddress; private String serviceAddress; private String lifelineAddress; -private String namenodeId; -private String
[13/29] hadoop git commit: HDFS-12223. Rebasing HDFS-10467. Contributed by Inigo Goiri.
HDFS-12223. Rebasing HDFS-10467. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/317af56f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/317af56f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/317af56f Branch: refs/heads/HDFS-10467 Commit: 317af56ff5cd4bfbf6f17afea7e10494064b36d1 Parents: d135a61 Author: Inigo GoiriAuthored: Fri Jul 28 15:55:10 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:14 2017 -0700 -- .../federation/router/RouterRpcServer.java | 59 +--- 1 file changed, 51 insertions(+), 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/317af56f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 4bae71e..eaaab39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -64,8 +64,9 @@ import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.inotify.EventBatchList; -import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.BlocksStats; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; @@ -75,6 +76,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -85,6 +87,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.OpenFileEntry; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; @@ -1736,13 +1739,6 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { } @Override // ClientProtocol - public AddingECPolicyResponse[] addErasureCodingPolicies( - ErasureCodingPolicy[] policies) throws IOException { -checkOperation(OperationCategory.WRITE, false); -return null; - } - - @Override // ClientProtocol public void unsetErasureCodingPolicy(String src) throws IOException { checkOperation(OperationCategory.WRITE, false); } @@ -1808,6 +1804,53 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { return null; } + @Override + public AddECPolicyResponse[] addErasureCodingPolicies( + ErasureCodingPolicy[] arg0) throws IOException { +checkOperation(OperationCategory.WRITE, false); +return null; + } + + @Override + public void removeErasureCodingPolicy(String arg0) throws IOException { +checkOperation(OperationCategory.WRITE, false); + } + + @Override + public void disableErasureCodingPolicy(String arg0) throws IOException { +checkOperation(OperationCategory.WRITE, false); + } + + @Override + public void enableErasureCodingPolicy(String arg0) throws IOException { +checkOperation(OperationCategory.WRITE, false); + } + + @Override + public ECBlockGroupsStats getECBlockGroupsStats() throws IOException { +checkOperation(OperationCategory.READ, false); +return null; + } + + @Override + public HashMap getErasureCodingCodecs() throws IOException { +checkOperation(OperationCategory.READ, false); +return null; + } + + @Override + public BlocksStats getBlocksStats() throws IOException { +checkOperation(OperationCategory.READ, false); +return null; + } + + @Override + public
[17/29] hadoop git commit: HDFS-11826. Federation Namenode Heartbeat. Contributed by Inigo Goiri.
HDFS-11826. Federation Namenode Heartbeat. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a3c44f75 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a3c44f75 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a3c44f75 Branch: refs/heads/HDFS-10467 Commit: a3c44f759d12f2691b78a23c3f60a6032b2ffcc5 Parents: a67299a Author: Inigo GoiriAuthored: Tue Aug 1 14:40:27 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:14 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 14 + .../java/org/apache/hadoop/hdfs/DFSUtil.java| 38 ++ .../resolver/NamenodeStatusReport.java | 193 ++ .../federation/router/FederationUtil.java | 66 .../router/NamenodeHeartbeatService.java| 350 +++ .../hdfs/server/federation/router/Router.java | 112 ++ .../src/main/resources/hdfs-default.xml | 32 ++ .../org/apache/hadoop/hdfs/MiniDFSCluster.java | 8 + .../hdfs/server/federation/MockResolver.java| 9 +- .../server/federation/RouterConfigBuilder.java | 22 ++ .../server/federation/RouterDFSCluster.java | 43 +++ .../router/TestNamenodeHeartbeat.java | 168 + .../server/federation/router/TestRouter.java| 3 + 13 files changed, 1057 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3c44f75/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b50c538..f0b0c63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1147,6 +1147,20 @@ public class DFSConfigKeys extends CommonConfigurationKeys { FEDERATION_ROUTER_PREFIX + "rpc.enable"; public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true; + // HDFS Router heartbeat + public static final String DFS_ROUTER_HEARTBEAT_ENABLE = + FEDERATION_ROUTER_PREFIX + "heartbeat.enable"; + public static final boolean DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT = true; + public static final String DFS_ROUTER_HEARTBEAT_INTERVAL_MS = + FEDERATION_ROUTER_PREFIX + "heartbeat.interval"; + public static final long DFS_ROUTER_HEARTBEAT_INTERVAL_MS_DEFAULT = + TimeUnit.SECONDS.toMillis(5); + public static final String DFS_ROUTER_MONITOR_NAMENODE = + FEDERATION_ROUTER_PREFIX + "monitor.namenode"; + public static final String DFS_ROUTER_MONITOR_LOCAL_NAMENODE = + FEDERATION_ROUTER_PREFIX + "monitor.localnamenode.enable"; + public static final boolean DFS_ROUTER_MONITOR_LOCAL_NAMENODE_DEFAULT = true; + // HDFS Router NN client public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE = FEDERATION_ROUTER_PREFIX + "connection.pool-size"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/a3c44f75/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 32a1cae..2f9781a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -1324,6 +1324,44 @@ public class DFSUtil { } /** + * Map a logical namenode ID to its web address. Use the given nameservice if + * specified, or the configured one if none is given. + * + * @param conf Configuration + * @param nsId which nameservice nnId is a part of, optional + * @param nnId the namenode ID to get the service addr for + * @return the service addr, null if it could not be determined + */ + public static String getNamenodeWebAddr(final Configuration conf, String nsId, + String nnId) { + +if (nsId == null) { + nsId = getOnlyNameServiceIdOrNull(conf); +} + +String webAddrKey = DFSUtilClient.concatSuffixes( +DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nsId, nnId); + +String webAddr = +conf.get(webAddrKey, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT); +return webAddr; + } + + /** + * Get all of the Web addresses of the individual NNs in a given nameservice. + * + * @param conf Configuration + * @param nsId the
[21/29] hadoop git commit: HDFS-10881. Federation State Store Driver API. Contributed by Jason Kace and Inigo Goiri.
HDFS-10881. Federation State Store Driver API. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fdd1fe9d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fdd1fe9d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fdd1fe9d Branch: refs/heads/HDFS-10467 Commit: fdd1fe9d5d862eb809b6bcf0ab430bb609902187 Parents: f6918e9 Author: InigoAuthored: Wed Mar 29 19:35:06 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:14 2017 -0700 -- .../store/StateStoreUnavailableException.java | 33 .../federation/store/StateStoreUtils.java | 72 +++ .../store/driver/StateStoreDriver.java | 172 + .../driver/StateStoreRecordOperations.java | 164 .../store/driver/impl/StateStoreBaseImpl.java | 69 +++ .../store/driver/impl/package-info.java | 39 .../federation/store/driver/package-info.java | 37 .../federation/store/protocol/package-info.java | 31 +++ .../federation/store/records/BaseRecord.java| 189 +++ .../federation/store/records/QueryResult.java | 56 ++ .../federation/store/records/package-info.java | 36 11 files changed, 898 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdd1fe9d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java new file mode 100644 index 000..4e6f8c8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store; + +import java.io.IOException; + +/** + * Thrown when the state store is not reachable or available. Cached APIs and + * queries may succeed. Client should retry again later. + */ +public class StateStoreUnavailableException extends IOException { + + private static final long serialVersionUID = 1L; + + public StateStoreUnavailableException(String msg) { +super(msg); + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/fdd1fe9d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java new file mode 100644 index 000..8c681df --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing
[26/29] hadoop git commit: HDFS-12335. Federation Metrics. Contributed by Inigo Goiri.
HDFS-12335. Federation Metrics. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/887d424f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/887d424f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/887d424f Branch: refs/heads/HDFS-10467 Commit: 887d424fc519048e49dcbd30fa2f38f9ad97ecf9 Parents: 06d9cea Author: Inigo GoiriAuthored: Fri Sep 8 09:37:10 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:15 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 11 + .../federation/metrics/FederationMBean.java | 204 ++ .../federation/metrics/FederationMetrics.java | 673 +++ .../federation/metrics/FederationRPCMBean.java | 90 +++ .../metrics/FederationRPCMetrics.java | 239 +++ .../FederationRPCPerformanceMonitor.java| 211 ++ .../federation/metrics/NamenodeBeanMetrics.java | 624 + .../federation/metrics/StateStoreMBean.java | 45 ++ .../federation/metrics/StateStoreMetrics.java | 144 .../server/federation/metrics/package-info.java | 27 + .../federation/router/ConnectionManager.java| 23 + .../federation/router/ConnectionPool.java | 23 + .../hdfs/server/federation/router/Router.java | 62 ++ .../server/federation/router/RouterMetrics.java | 73 ++ .../federation/router/RouterMetricsService.java | 108 +++ .../federation/router/RouterRpcClient.java | 39 +- .../federation/router/RouterRpcMonitor.java | 95 +++ .../federation/router/RouterRpcServer.java | 63 +- .../federation/store/CachedRecordStore.java | 8 + .../federation/store/StateStoreService.java | 42 +- .../store/driver/StateStoreDriver.java | 17 +- .../driver/impl/StateStoreSerializableImpl.java | 6 +- .../driver/impl/StateStoreZooKeeperImpl.java| 26 + .../store/records/MembershipState.java | 2 +- .../federation/store/records/MountTable.java| 23 + .../records/impl/pb/MembershipStatePBImpl.java | 5 +- .../src/main/resources/hdfs-default.xml | 19 +- .../server/federation/FederationTestUtils.java | 13 + .../server/federation/RouterConfigBuilder.java | 13 + .../metrics/TestFederationMetrics.java | 237 +++ .../federation/metrics/TestMetricsBase.java | 150 + .../server/federation/router/TestRouter.java| 23 +- .../store/driver/TestStateStoreDriverBase.java | 69 ++ 33 files changed, 3383 insertions(+), 24 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/887d424f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 7bb08af..57251a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -29,6 +29,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFau import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker; import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; +import org.apache.hadoop.hdfs.server.federation.router.RouterRpcMonitor; +import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformanceMonitor; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; @@ -1149,6 +1151,15 @@ public class DFSConfigKeys extends CommonConfigurationKeys { FEDERATION_ROUTER_PREFIX + "rpc.enable"; public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true; + public static final String DFS_ROUTER_METRICS_ENABLE = + FEDERATION_ROUTER_PREFIX + "metrics.enable"; + public static final boolean DFS_ROUTER_METRICS_ENABLE_DEFAULT = true; + public static final String DFS_ROUTER_METRICS_CLASS = + FEDERATION_ROUTER_PREFIX + "metrics.class"; + public static final Class + DFS_ROUTER_METRICS_CLASS_DEFAULT = + FederationRPCPerformanceMonitor.class; + // HDFS Router heartbeat public static final String DFS_ROUTER_HEARTBEAT_ENABLE = FEDERATION_ROUTER_PREFIX + "heartbeat.enable";
[09/29] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea78e866/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java new file mode 100644 index 000..7f0b36a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java @@ -0,0 +1,483 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.driver; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; +import org.apache.hadoop.hdfs.server.federation.store.records.Query; +import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult; +import org.junit.AfterClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Base tests for the driver. The particular implementations will use this to + * test their functionality. + */ +public class TestStateStoreDriverBase { + + private static final Logger LOG = + LoggerFactory.getLogger(TestStateStoreDriverBase.class); + + private static StateStoreService stateStore; + private static Configuration conf; + + + /** + * Get the State Store driver. + * @return State Store driver. + */ + protected StateStoreDriver getStateStoreDriver() { +return stateStore.getDriver(); + } + + @AfterClass + public static void tearDownCluster() { +if (stateStore != null) { + stateStore.stop(); +} + } + + /** + * Get a new State Store using this configuration. + * + * @param config Configuration for the State Store. + * @throws Exception If we cannot get the State Store. + */ + public static void getStateStore(Configuration config) throws Exception { +conf = config; +stateStore = FederationStateStoreTestUtils.getStateStore(conf); + } + + private T generateFakeRecord(Class recordClass) + throws IllegalArgumentException, IllegalAccessException, IOException { + +// TODO add record +return null; + } + + /** + * Validate if a record is the same. + * + * @param original + * @param committed + * @param assertEquals Assert if the records are equal or just return. + * @return + * @throws IllegalArgumentException + * @throws IllegalAccessException + */ + private boolean validateRecord( + BaseRecord original, BaseRecord committed, boolean assertEquals) + throws IllegalArgumentException, IllegalAccessException { + +boolean ret = true; + +Mapfields = getFields(original); +for (String key : fields.keySet()) { + if (key.equals("dateModified") || + key.equals("dateCreated") || + key.equals("proto")) { +// Fields are updated/set on commit and fetch and may not match +// the fields that are initialized in a non-committed object. +continue; + } + Object data1 = getField(original, key); + Object data2 = getField(committed, key); + if (assertEquals) { +assertEquals("Field " + key + " does not match", data1, data2); + } else if (!data1.equals(data2)) { +ret = false; + } +} + +long now =
[12/29] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.
HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6918e94 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6918e94 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6918e94 Branch: refs/heads/HDFS-10467 Commit: f6918e942866a382b43e50981eec6ba04fcc968e Parents: d4d2fd1 Author: InigoAuthored: Tue Mar 28 14:30:59 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:14 2017 -0700 -- .../hadoop-hdfs/src/main/bin/hdfs | 5 + .../hadoop-hdfs/src/main/bin/hdfs.cmd | 8 +- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 17 + .../resolver/ActiveNamenodeResolver.java| 117 +++ .../resolver/FederationNamenodeContext.java | 87 +++ .../FederationNamenodeServiceState.java | 46 ++ .../resolver/FederationNamespaceInfo.java | 99 +++ .../resolver/FileSubclusterResolver.java| 75 ++ .../resolver/NamenodePriorityComparator.java| 63 ++ .../resolver/NamenodeStatusReport.java | 195 + .../federation/resolver/PathLocation.java | 122 +++ .../federation/resolver/RemoteLocation.java | 74 ++ .../federation/resolver/package-info.java | 41 + .../federation/router/FederationUtil.java | 117 +++ .../router/RemoteLocationContext.java | 38 + .../hdfs/server/federation/router/Router.java | 263 +++ .../federation/router/RouterRpcServer.java | 102 +++ .../server/federation/router/package-info.java | 31 + .../federation/store/StateStoreService.java | 77 ++ .../server/federation/store/package-info.java | 62 ++ .../src/main/resources/hdfs-default.xml | 16 + .../server/federation/FederationTestUtils.java | 233 ++ .../hdfs/server/federation/MockResolver.java| 290 +++ .../server/federation/RouterConfigBuilder.java | 40 + .../server/federation/RouterDFSCluster.java | 767 +++ .../server/federation/router/TestRouter.java| 96 +++ 26 files changed, 3080 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6918e94/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index e6405b5..b1f44a4 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -57,6 +57,7 @@ function hadoop_usage hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an fsimage" hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer to a legacy fsimage" hadoop_add_subcommand "portmap" daemon "run a portmap service" + hadoop_add_subcommand "router" daemon "run the DFS router" hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode" hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot" hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies" @@ -176,6 +177,10 @@ function hdfscmd_case HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap ;; +router) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.Router' +;; secondarynamenode) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6918e94/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd index 2181e47..b9853d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd @@ -59,7 +59,7 @@ if "%1" == "--loglevel" ( ) ) - set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto debug + set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto router debug for %%i in ( %hdfscommands% ) do ( if %hdfs-command% == %%i set hdfscommand=true
[24/29] hadoop git commit: HDFS-12430. Rebasing HDFS-10467 After HDFS-12269 and HDFS-12218. Contributed by Inigo Goiri.
HDFS-12430. Rebasing HDFS-10467 After HDFS-12269 and HDFS-12218. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a034972a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a034972a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a034972a Branch: refs/heads/HDFS-10467 Commit: a034972a9b45cab618dcedeafa1a6731c92a09bb Parents: 887d424 Author: Inigo GoiriAuthored: Wed Sep 13 09:15:13 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:15 2017 -0700 -- .../hdfs/server/federation/router/RouterRpcServer.java| 10 +- 1 file changed, 5 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a034972a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 6aee1ee..1fa1720 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.inotify.EventBatchList; import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.BlocksStats; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; @@ -76,7 +75,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats; +import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -89,6 +88,7 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.OpenFileEntry; +import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; @@ -1879,19 +1879,19 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { } @Override - public ECBlockGroupsStats getECBlockGroupsStats() throws IOException { + public ECBlockGroupStats getECBlockGroupStats() throws IOException { checkOperation(OperationCategory.READ, false); return null; } @Override - public HashMap getErasureCodingCodecs() throws IOException { + public Map getErasureCodingCodecs() throws IOException { checkOperation(OperationCategory.READ, false); return null; } @Override - public BlocksStats getBlocksStats() throws IOException { + public ReplicatedBlockStats getReplicatedBlockStats() throws IOException { checkOperation(OperationCategory.READ, false); return null; } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[08/29] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.
HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d135a61d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d135a61d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d135a61d Branch: refs/heads/HDFS-10467 Commit: d135a61df59c0500b669cc425423a72301688616 Parents: ea78e86 Author: Inigo GoiriAuthored: Thu May 11 09:57:03 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:14 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 38 + .../resolver/FederationNamespaceInfo.java | 46 +- .../federation/resolver/RemoteLocation.java | 46 +- .../federation/router/ConnectionContext.java| 104 + .../federation/router/ConnectionManager.java| 408 .../federation/router/ConnectionPool.java | 314 +++ .../federation/router/ConnectionPoolId.java | 117 ++ .../router/RemoteLocationContext.java | 38 +- .../server/federation/router/RemoteMethod.java | 164 ++ .../server/federation/router/RemoteParam.java | 71 + .../hdfs/server/federation/router/Router.java | 58 +- .../federation/router/RouterRpcClient.java | 856 .../federation/router/RouterRpcServer.java | 1867 +- .../src/main/resources/hdfs-default.xml | 95 + .../server/federation/FederationTestUtils.java | 80 +- .../hdfs/server/federation/MockResolver.java| 90 +- .../server/federation/RouterConfigBuilder.java | 20 +- .../server/federation/RouterDFSCluster.java | 535 +++-- .../server/federation/router/TestRouter.java| 31 +- .../server/federation/router/TestRouterRpc.java | 869 .../router/TestRouterRpcMultiDestination.java | 216 ++ 21 files changed, 5675 insertions(+), 388 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d135a61d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 10074ce..c7b4c01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1120,6 +1120,44 @@ public class DFSConfigKeys extends CommonConfigurationKeys { // HDFS Router-based federation public static final String FEDERATION_ROUTER_PREFIX = "dfs.federation.router."; + public static final String DFS_ROUTER_DEFAULT_NAMESERVICE = + FEDERATION_ROUTER_PREFIX + "default.nameserviceId"; + public static final String DFS_ROUTER_HANDLER_COUNT_KEY = + FEDERATION_ROUTER_PREFIX + "handler.count"; + public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10; + public static final String DFS_ROUTER_READER_QUEUE_SIZE_KEY = + FEDERATION_ROUTER_PREFIX + "reader.queue.size"; + public static final int DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT = 100; + public static final String DFS_ROUTER_READER_COUNT_KEY = + FEDERATION_ROUTER_PREFIX + "reader.count"; + public static final int DFS_ROUTER_READER_COUNT_DEFAULT = 1; + public static final String DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY = + FEDERATION_ROUTER_PREFIX + "handler.queue.size"; + public static final int DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT = 100; + public static final String DFS_ROUTER_RPC_BIND_HOST_KEY = + FEDERATION_ROUTER_PREFIX + "rpc-bind-host"; + public static final int DFS_ROUTER_RPC_PORT_DEFAULT = ; + public static final String DFS_ROUTER_RPC_ADDRESS_KEY = + FEDERATION_ROUTER_PREFIX + "rpc-address"; + public static final String DFS_ROUTER_RPC_ADDRESS_DEFAULT = + "0.0.0.0:" + DFS_ROUTER_RPC_PORT_DEFAULT; + public static final String DFS_ROUTER_RPC_ENABLE = + FEDERATION_ROUTER_PREFIX + "rpc.enable"; + public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true; + + // HDFS Router NN client + public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE = + FEDERATION_ROUTER_PREFIX + "connection.pool-size"; + public static final int DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT = + 64; + public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN = + FEDERATION_ROUTER_PREFIX + "connection.pool.clean.ms"; + public static final long DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN_DEFAULT = + TimeUnit.MINUTES.toMillis(1); + public static final String DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS = + FEDERATION_ROUTER_PREFIX + "connection.clean.ms"; + public
[25/29] hadoop git commit: HDFS-12335. Federation Metrics. Contributed by Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/887d424f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java new file mode 100644 index 000..851538a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName; +import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; +import org.apache.hadoop.metrics2.source.JvmMetrics; + +/** + * This class is for maintaining the various Router activity statistics + * and publishing them through the metrics interfaces. + */ +@Metrics(name="RouterActivity", about="Router metrics", context="dfs") +public class RouterMetrics { + + private final MetricsRegistry registry = new MetricsRegistry("router"); + + @Metric("Duration in SafeMode at startup in msec") + private MutableGaugeInt safeModeTime; + + private JvmMetrics jvmMetrics = null; + + RouterMetrics( + String processName, String sessionId, final JvmMetrics jvmMetrics) { +this.jvmMetrics = jvmMetrics; +registry.tag(ProcessName, processName).tag(SessionId, sessionId); + } + + public static RouterMetrics create(Configuration conf) { +String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); +String processName = "Router"; +MetricsSystem ms = DefaultMetricsSystem.instance(); +JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms); + +return ms.register(new RouterMetrics(processName, sessionId, jm)); + } + + public JvmMetrics getJvmMetrics() { +return jvmMetrics; + } + + public void shutdown() { +DefaultMetricsSystem.shutdown(); + } + + public void setSafeModeTime(long elapsed) { +safeModeTime.set((int) elapsed); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/887d424f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java new file mode 100644 index 000..f4debce --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */
[22/29] hadoop git commit: HDFS-12450. Fixing TestNamenodeHeartbeat and support non-HA. Contributed by Inigo Goiri.
HDFS-12450. Fixing TestNamenodeHeartbeat and support non-HA. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e2ceb67 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e2ceb67 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e2ceb67 Branch: refs/heads/HDFS-10467 Commit: 7e2ceb67ebe34fbaace46e6d8ef2836f96639f30 Parents: a034972 Author: Inigo GoiriAuthored: Fri Sep 15 16:02:12 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:15 2017 -0700 -- .../router/NamenodeHeartbeatService.java| 47 .../server/federation/RouterDFSCluster.java | 23 +- 2 files changed, 50 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e2ceb67/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java index fe4f939..38f63e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java @@ -94,8 +94,9 @@ public class NamenodeHeartbeatService extends PeriodicService { */ public NamenodeHeartbeatService( ActiveNamenodeResolver resolver, String nsId, String nnId) { -super(NamenodeHeartbeatService.class.getSimpleName() + " " + nsId + " " + -nnId); +super(NamenodeHeartbeatService.class.getSimpleName() + +(nsId == null ? "" : " " + nsId) + +(nnId == null ? "" : " " + nnId)); this.resolver = resolver; @@ -109,28 +110,28 @@ public class NamenodeHeartbeatService extends PeriodicService { this.conf = configuration; +String nnDesc = nameserviceId; if (this.namenodeId != null && !this.namenodeId.isEmpty()) { this.localTarget = new NNHAServiceTarget( conf, nameserviceId, namenodeId); + nnDesc += "-" + namenodeId; } else { this.localTarget = null; } // Get the RPC address for the clients to connect this.rpcAddress = getRpcAddress(conf, nameserviceId, namenodeId); -LOG.info("{}-{} RPC address: {}", -nameserviceId, namenodeId, rpcAddress); +LOG.info("{} RPC address: {}", nnDesc, rpcAddress); // Get the Service RPC address for monitoring this.serviceAddress = DFSUtil.getNamenodeServiceAddr(conf, nameserviceId, namenodeId); if (this.serviceAddress == null) { - LOG.error("Cannot locate RPC service address for NN {}-{}, " + - "using RPC address {}", nameserviceId, namenodeId, this.rpcAddress); + LOG.error("Cannot locate RPC service address for NN {}, " + + "using RPC address {}", nnDesc, this.rpcAddress); this.serviceAddress = this.rpcAddress; } -LOG.info("{}-{} Service RPC address: {}", -nameserviceId, namenodeId, serviceAddress); +LOG.info("{} Service RPC address: {}", nnDesc, serviceAddress); // Get the Lifeline RPC address for faster monitoring this.lifelineAddress = @@ -138,13 +139,12 @@ public class NamenodeHeartbeatService extends PeriodicService { if (this.lifelineAddress == null) { this.lifelineAddress = this.serviceAddress; } -LOG.info("{}-{} Lifeline RPC address: {}", -nameserviceId, namenodeId, lifelineAddress); +LOG.info("{} Lifeline RPC address: {}", nnDesc, lifelineAddress); // Get the Web address for UI this.webAddress = DFSUtil.getNamenodeWebAddr(conf, nameserviceId, namenodeId); -LOG.info("{}-{} Web address: {}", nameserviceId, namenodeId, webAddress); +LOG.info("{} Web address: {}", nnDesc, webAddress); this.setIntervalMs(conf.getLong( DFS_ROUTER_HEARTBEAT_INTERVAL_MS, @@ -173,7 +173,7 @@ public class NamenodeHeartbeatService extends PeriodicService { String confKey = DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; String ret = conf.get(confKey); -if (nsId != null && nnId != null) { +if (nsId != null || nnId != null) { // Get if for the proper nameservice and namenode confKey = DFSUtil.addKeySuffixes(confKey, nsId, nnId); ret = conf.get(confKey); @@ -182,10 +182,16 @@ public class NamenodeHeartbeatService extends PeriodicService { if (ret == null) { Map
[06/29] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d135a61d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 24792bb..4bae71e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -17,16 +17,109 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_KEY; + +import java.io.FileNotFoundException; import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CryptoProtocolVersion; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; +import org.apache.hadoop.fs.CacheFlag; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.QuotaUsage; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.AddBlockFlag; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.inotify.EventBatchList; +import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import
[23/29] hadoop git commit: HDFS-12312. Rebasing HDFS-10467 (2). Contributed by Inigo Goiri.
HDFS-12312. Rebasing HDFS-10467 (2). Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41ac038b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41ac038b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41ac038b Branch: refs/heads/HDFS-10467 Commit: 41ac038b707a05fbf6ae09dc3cfac2b5a2d156ed Parents: a4705dd Author: Inigo GoiriAuthored: Wed Aug 16 17:31:37 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:15 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs | 1 - .../hadoop/hdfs/server/federation/router/RouterRpcServer.java | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/41ac038b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index d51a8e2..d122ff7 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -31,7 +31,6 @@ function hadoop_usage hadoop_add_option "--hosts filename" "list of hosts to use in worker mode" hadoop_add_option "--workers" "turn on worker mode" -<<< HEAD hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility" hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache" hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries" http://git-wip-us.apache.org/repos/asf/hadoop/blob/41ac038b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index eaaab39..c77d255 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -1946,6 +1946,7 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { } long inodeId = 0; return new HdfsFileStatus(0, true, 0, 0, modTime, accessTime, permission, +EnumSet.noneOf(HdfsFileStatus.Flags.class), owner, group, new byte[0], DFSUtil.string2Bytes(name), inodeId, childrenNum, null, (byte) 0, null); } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[15/29] hadoop git commit: HDFS-10880. Federation Mount Table State Store internal API. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a93a7655/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java new file mode 100644 index 000..7f7c998 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb; + +import java.io.IOException; + +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProtoOrBuilder; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord; + +import com.google.protobuf.Message; + +/** + * Protobuf implementation of the state store API object + * RemoveMountTableEntryRequest. + */ +public class RemoveMountTableEntryRequestPBImpl +extends RemoveMountTableEntryRequest implements PBRecord { + + private FederationProtocolPBTranslatortranslator = + new FederationProtocolPBTranslator ( + RemoveMountTableEntryRequestProto.class); + + public RemoveMountTableEntryRequestPBImpl() { + } + + public RemoveMountTableEntryRequestPBImpl( + RemoveMountTableEntryRequestProto proto) { +this.setProto(proto); + } + + @Override + public RemoveMountTableEntryRequestProto getProto() { +return this.translator.build(); + } + + @Override + public void setProto(Message proto) { +this.translator.setProto(proto); + } + + @Override + public void readInstance(String base64String) throws IOException { +this.translator.readInstance(base64String); + } + + @Override + public String getSrcPath() { +return this.translator.getProtoOrBuilder().getSrcPath(); + } + + @Override + public void setSrcPath(String path) { +this.translator.getBuilder().setSrcPath(path); + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/a93a7655/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java new file mode 100644 index 000..0c943ac --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + *
[14/29] hadoop git commit: HDFS-11554. [Documentation] Router-based federation documentation. Contributed by Inigo Goiri.
HDFS-11554. [Documentation] Router-based federation documentation. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4705dd0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4705dd0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4705dd0 Branch: refs/heads/HDFS-10467 Commit: a4705dd036aea33a19a9433d1557e728eafabaf7 Parents: cb6986a Author: Inigo GoiriAuthored: Wed Aug 16 17:23:29 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:14 2017 -0700 -- .../src/site/markdown/HDFSRouterFederation.md | 170 +++ .../site/resources/images/routerfederation.png | Bin 0 -> 24961 bytes 2 files changed, 170 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4705dd0/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md new file mode 100644 index 000..f094238 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md @@ -0,0 +1,170 @@ + + +HDFS Router-based Federation + + + + +Introduction + + +NameNodes have scalability limits because of the metadata overhead comprised of inodes (files and directories) and file blocks, the number of Datanode heartbeats, and the number of HDFS RPC client requests. +The common solution is to split the filesystem into smaller subclusters [HDFS Federation](.Federation.html) and provide a federated view [ViewFs](.ViewFs.html). +The problem is how to maintain the split of the subclusters (e.g., namespace partition), which forces users to connect to multiple subclusters and manage the allocation of folders/files to them. + + +Architecture + + +A natural extension to this partitioned federation is to add a layer of software responsible for federating the namespaces. +This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and supports rebalancing of data across subclusters. +To accomplish these goals, the federation layer directs block accesses to the proper subcluster, maintains the state of the namespaces, and provides mechanisms for data rebalancing. +This layer must be scalable, highly available, and fault tolerant. + +This federation layer comprises multiple components. +The _Router_ component that has the same interface as a NameNode, and forwards the client requests to the correct subcluster, based on ground-truth information from a State Store. +The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](.ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters. +This approach has the same architecture as [YARN federation](../hadoop-yarn/Federation.html). + +![Router-based Federation Sequence Diagram | width=800](./images/routerfederation.png) + + +### Example flow +The simplest configuration deploys a Router on each NameNode machine. +The Router monitors the local NameNode and heartbeats the state to the State Store. +When a regular DFS client contacts any of the Routers to access a file in the federated filesystem, the Router checks the Mount Table in the State Store (i.e., the local cache) to find out which subcluster contains the file. +Then it checks the Membership table in the State Store (i.e., the local cache) for the NameNode responsible for the subcluster. +After it has identified the correct NameNode, the Router proxies the request. +The client accesses Datanodes directly. + + +### Router +There can be multiple Routers in the system with soft state. +Each Router has two roles: + +* Federated interface: expose a single, global NameNode interface to the clients and forward the requests to the active NameNode in the correct subcluster +* NameNode heartbeat: maintain the information about a NameNode in the State Store + + Federated interface +The Router receives a client request, checks the State Store for the correct subcluster, and forwards the request to the active NameNode of that subcluster. +The reply from the NameNode then flows in the opposite direction. +The Routers are stateless and can be behind a load balancer. +For performance, the Router also caches remote mount table entries and the state of the subclusters. +To make sure that changes have been propagated to all Routers, each Router heartbeats its state to the State Store. + +The communications between the Routers and the State Store are cached (with timed
[11/29] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6918e94/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java new file mode 100644 index 000..ee6f57d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -0,0 +1,290 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.NamenodePriorityComparator; +import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; +import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.util.Time; + +/** + * In-memory cache/mock of a namenode and file resolver. Stores the most + * recently updated NN information for each nameservice and block pool. Also + * stores a virtual mount table for resolving global namespace paths to local NN + * paths. + */ +public class MockResolver +implements ActiveNamenodeResolver, FileSubclusterResolver { + + private Mapresolver = + new HashMap (); + private Map locations = + new HashMap (); + private Set namespaces = + new HashSet(); + private String defaultNamespace = null; + + public MockResolver(Configuration conf, StateStoreService store) { +this.cleanRegistrations(); + } + + public void addLocation(String mount, String nameservice, String location) { +RemoteLocation remoteLocation = new RemoteLocation(nameservice, location); +List locationsList = locations.get(mount); +if (locationsList == null) { + locationsList = new LinkedList(); + locations.put(mount, locationsList); +} +if (!locationsList.contains(remoteLocation)) { + locationsList.add(remoteLocation); +} + +if (this.defaultNamespace == null) { + this.defaultNamespace = nameservice; +} + } + + public synchronized void cleanRegistrations() { +this.resolver = +new HashMap (); +this.namespaces = new HashSet(); + } + + @Override + public void updateActiveNamenode( + String ns, InetSocketAddress successfulAddress) { + +String address = successfulAddress.getHostName() + ":" + +successfulAddress.getPort(); +String key = ns; +if (key != null) { + // Update the active entry + @SuppressWarnings("unchecked") + List iterator = + (List) resolver.get(key); + for (FederationNamenodeContext namenode : iterator) { +if (namenode.getRpcAddress().equals(address)) { + MockNamenodeContext nn = (MockNamenodeContext) namenode; + nn.setState(FederationNamenodeServiceState.ACTIVE); + break; +} + } + Collections.sort(iterator, new NamenodePriorityComparator()); +} + } + + @Override + public List + getNamenodesForNameserviceId(String nameserviceId) { +return resolver.get(nameserviceId); + }
[28/29] hadoop git commit: HDFS-12381. [Documentation] Adding configuration keys for the Router. Contributed by Inigo Goiri.
HDFS-12381. [Documentation] Adding configuration keys for the Router. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34d5dc3a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34d5dc3a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34d5dc3a Branch: refs/heads/HDFS-10467 Commit: 34d5dc3a47329430781520e866823cc5bf944a7d Parents: 7e2ceb6 Author: Inigo GoiriAuthored: Fri Sep 22 13:06:10 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 18:33:15 2017 -0700 -- .../src/main/resources/hdfs-default.xml | 11 +- .../src/site/markdown/HDFSRouterFederation.md | 159 +-- 2 files changed, 156 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/34d5dc3a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index d58fcae..9a75f7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4652,7 +4652,8 @@ dfs.federation.router.rpc.enable true - If the RPC service to handle client requests in the router is enabled. + If true, the RPC service to handle client requests in the router is + enabled. @@ -4756,7 +4757,7 @@ dfs.federation.router.admin.enable true - If the RPC admin service to handle client requests in the router is + If true, the RPC admin service to handle client requests in the router is enabled. @@ -4810,7 +4811,7 @@ dfs.federation.router.store.enable true - If the Router connects to the State Store. + If true, the Router connects to the State Store. @@ -4858,7 +4859,7 @@ dfs.federation.router.heartbeat.enable true - Enables the Router to heartbeat into the State Store. + If true, the Router heartbeats into the State Store. @@ -4882,7 +4883,7 @@ dfs.federation.router.monitor.localnamenode.enable true - If the Router should monitor the namenode in the local machine. + If true, the Router should monitor the namenode in the local machine. http://git-wip-us.apache.org/repos/asf/hadoop/blob/34d5dc3a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md index f094238..1cea7f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md @@ -21,7 +21,7 @@ Introduction NameNodes have scalability limits because of the metadata overhead comprised of inodes (files and directories) and file blocks, the number of Datanode heartbeats, and the number of HDFS RPC client requests. -The common solution is to split the filesystem into smaller subclusters [HDFS Federation](.Federation.html) and provide a federated view [ViewFs](.ViewFs.html). +The common solution is to split the filesystem into smaller subclusters [HDFS Federation](./Federation.html) and provide a federated view [ViewFs](./ViewFs.html). The problem is how to maintain the split of the subclusters (e.g., namespace partition), which forces users to connect to multiple subclusters and manage the allocation of folders/files to them. @@ -35,7 +35,7 @@ This layer must be scalable, highly available, and fault tolerant. This federation layer comprises multiple components. The _Router_ component that has the same interface as a NameNode, and forwards the client requests to the correct subcluster, based on ground-truth information from a State Store. -The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](.ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters. +The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](./ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters. This approach has the same architecture as [YARN federation](../hadoop-yarn/Federation.html). ![Router-based Federation Sequence Diagram | width=800](./images/routerfederation.png) @@ -101,11 +101,11 @@ To interact with the users and the administrators, the
hadoop git commit: YARN-2037. Add work preserving restart support for Unmanaged AMs. (Botong Huang via Subru).
Repository: hadoop Updated Branches: refs/heads/trunk 015abcd8c -> d4d2fd1ac YARN-2037. Add work preserving restart support for Unmanaged AMs. (Botong Huang via Subru). Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4d2fd1a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4d2fd1a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4d2fd1a Branch: refs/heads/trunk Commit: d4d2fd1acd2fdddf04f45e67897804eea30d79a1 Parents: 015abcd Author: Subru KrishnanAuthored: Mon Oct 2 18:14:44 2017 -0700 Committer: Subru Krishnan Committed: Mon Oct 2 18:14:44 2017 -0700 -- .../yarn/api/ApplicationMasterProtocol.java | 21 ++- .../records/ApplicationSubmissionContext.java | 17 +- .../ApplicationMasterService.java | 26 +-- .../resourcemanager/DefaultAMSProcessor.java| 5 + .../rmapp/attempt/RMAppAttemptImpl.java | 6 +- .../scheduler/AbstractYarnScheduler.java| 13 +- .../TestWorkPreservingUnmanagedAM.java | 159 +++ 7 files changed, 214 insertions(+), 33 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4d2fd1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationMasterProtocol.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationMasterProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationMasterProtocol.java index 4d78961..eb40fc7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationMasterProtocol.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationMasterProtocol.java @@ -55,27 +55,32 @@ public interface ApplicationMasterProtocol { * The interface used by a new ApplicationMaster to register with * the ResourceManager. * - * + * * * The ApplicationMaster needs to provide details such as RPC * Port, HTTP tracking url etc. as specified in * {@link RegisterApplicationMasterRequest}. * - * + * * * The ResourceManager responds with critical details such as * maximum resource capabilities in the cluster as specified in * {@link RegisterApplicationMasterResponse}. * - * - * @param request - * registration request + * + * + * Re-register is only allowed for Unmanaged Application Master + * (UAM) HA, with + * {@link org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext#getKeepContainersAcrossApplicationAttempts()} + * set to true. + * + * + * @param request registration request * @return registration respose * @throws YarnException * @throws IOException - * @throws InvalidApplicationMasterRequestException - * The exception is thrown when an ApplicationMaster tries to - * register more then once. + * @throws InvalidApplicationMasterRequestException The exception is thrown + * when an ApplicationMaster tries to register more then once. * @see RegisterApplicationMasterRequest * @see RegisterApplicationMasterResponse */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4d2fd1a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java index 4f1d147..a6bbca7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationSubmissionContext.java @@ -395,15 +395,18 @@ public abstract class ApplicationSubmissionContext { * Set the flag which indicates whether to keep containers across application * attempts. * - * If the flag is true, running containers will not be killed when application - * attempt fails and these containers will be retrieved by the new application - * attempt on registration via + * For managed AM, if the flag is true, running containers will not be killed + * when application attempt fails and these containers will be retrieved by + * the new application attempt on registration via
[16/29] hadoop git commit: HDFS-11826. Federation Namenode Heartbeat. Contributed by Inigo Goiri.
HDFS-11826. Federation Namenode Heartbeat. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/14339987 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/14339987 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/14339987 Branch: refs/heads/HDFS-10467 Commit: 14339987987494f1186f70e98d1f5155071c Parents: 651420b Author: Inigo GoiriAuthored: Tue Aug 1 14:40:27 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:12 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 14 + .../java/org/apache/hadoop/hdfs/DFSUtil.java| 38 ++ .../resolver/NamenodeStatusReport.java | 193 ++ .../federation/router/FederationUtil.java | 66 .../router/NamenodeHeartbeatService.java| 350 +++ .../hdfs/server/federation/router/Router.java | 112 ++ .../src/main/resources/hdfs-default.xml | 32 ++ .../org/apache/hadoop/hdfs/MiniDFSCluster.java | 8 + .../hdfs/server/federation/MockResolver.java| 9 +- .../server/federation/RouterConfigBuilder.java | 22 ++ .../server/federation/RouterDFSCluster.java | 43 +++ .../router/TestNamenodeHeartbeat.java | 168 + .../server/federation/router/TestRouter.java| 3 + 13 files changed, 1057 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/14339987/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b50c538..f0b0c63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1147,6 +1147,20 @@ public class DFSConfigKeys extends CommonConfigurationKeys { FEDERATION_ROUTER_PREFIX + "rpc.enable"; public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true; + // HDFS Router heartbeat + public static final String DFS_ROUTER_HEARTBEAT_ENABLE = + FEDERATION_ROUTER_PREFIX + "heartbeat.enable"; + public static final boolean DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT = true; + public static final String DFS_ROUTER_HEARTBEAT_INTERVAL_MS = + FEDERATION_ROUTER_PREFIX + "heartbeat.interval"; + public static final long DFS_ROUTER_HEARTBEAT_INTERVAL_MS_DEFAULT = + TimeUnit.SECONDS.toMillis(5); + public static final String DFS_ROUTER_MONITOR_NAMENODE = + FEDERATION_ROUTER_PREFIX + "monitor.namenode"; + public static final String DFS_ROUTER_MONITOR_LOCAL_NAMENODE = + FEDERATION_ROUTER_PREFIX + "monitor.localnamenode.enable"; + public static final boolean DFS_ROUTER_MONITOR_LOCAL_NAMENODE_DEFAULT = true; + // HDFS Router NN client public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE = FEDERATION_ROUTER_PREFIX + "connection.pool-size"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/14339987/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 32a1cae..2f9781a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -1324,6 +1324,44 @@ public class DFSUtil { } /** + * Map a logical namenode ID to its web address. Use the given nameservice if + * specified, or the configured one if none is given. + * + * @param conf Configuration + * @param nsId which nameservice nnId is a part of, optional + * @param nnId the namenode ID to get the service addr for + * @return the service addr, null if it could not be determined + */ + public static String getNamenodeWebAddr(final Configuration conf, String nsId, + String nnId) { + +if (nsId == null) { + nsId = getOnlyNameServiceIdOrNull(conf); +} + +String webAddrKey = DFSUtilClient.concatSuffixes( +DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nsId, nnId); + +String webAddr = +conf.get(webAddrKey, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT); +return webAddr; + } + + /** + * Get all of the Web addresses of the individual NNs in a given nameservice. + * + * @param conf Configuration + * @param nsId the
[14/29] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/15b15501/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java new file mode 100644 index 000..170247f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java @@ -0,0 +1,261 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.synchronizeRecords; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext; +import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.util.Time; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * The administrator interface of the {@link Router} implemented by + * {@link RouterAdminServer}. + */ +public class TestRouterAdmin { + + private static StateStoreDFSCluster cluster; + private static RouterContext routerContext; + public static final String RPC_BEAN = + "Hadoop:service=Router,name=FederationRPC"; + private static List mockMountTable; + private static StateStoreService stateStore; + + @BeforeClass + public static void globalSetUp() throws Exception { +cluster = new StateStoreDFSCluster(false, 1); +// Build and start a router with State Store + admin + RPC +Configuration conf = new RouterConfigBuilder() +.stateStore() +.admin() +.rpc() +.build(); +cluster.addRouterOverrides(conf); +cluster.startRouters(); +routerContext = cluster.getRandomRouter(); +mockMountTable = cluster.generateMockMountTable(); +Router router = routerContext.getRouter(); +stateStore = router.getStateStore(); + } + + @AfterClass + public static void tearDown() { +cluster.stopRouter(routerContext); + } + + @Before + public void testSetup() throws Exception { +assertTrue( +synchronizeRecords(stateStore, mockMountTable, MountTable.class)); + } + + @Test + public void testAddMountTable() throws IOException { +MountTable newEntry = MountTable.newInstance( +"/testpath", Collections.singletonMap("ns0", "/testdir"), +Time.now(), Time.now()); + +RouterClient client = routerContext.getAdminClient(); +MountTableManager mountTable = client.getMountTableManager(); + +// Existing mount table size +List records =
[26/29] hadoop git commit: HDFS-10631. Federation State Store ZooKeeper implementation. Contributed by Jason Kace and Inigo Goiri.
HDFS-10631. Federation State Store ZooKeeper implementation. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1416ec5d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1416ec5d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1416ec5d Branch: refs/heads/HDFS-10467 Commit: 1416ec5d01edacf249c622fa0c9d1c572a7df2ac Parents: 48a8fee Author: Inigo GoiriAuthored: Mon Aug 21 11:40:41 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:13 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 9 + .../driver/impl/StateStoreSerializableImpl.java | 19 ++ .../driver/impl/StateStoreZooKeeperImpl.java| 298 +++ .../store/driver/TestStateStoreDriverBase.java | 2 +- .../store/driver/TestStateStoreZK.java | 105 +++ 5 files changed, 432 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1416ec5d/hadoop-hdfs-project/hadoop-hdfs/pom.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 93216db..d22d6ee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -203,6 +203,15 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;> com.fasterxml.jackson.core jackson-databind + + org.apache.curator + curator-framework + + + org.apache.curator + curator-test + test + http://git-wip-us.apache.org/repos/asf/hadoop/blob/1416ec5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java index e9b3fdf..e2038fa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreSerializableImpl.java @@ -30,6 +30,11 @@ import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; */ public abstract class StateStoreSerializableImpl extends StateStoreBaseImpl { + /** Mark for slashes in path names. */ + protected static final String SLASH_MARK = "0SLASH0"; + /** Mark for colon in path names. */ + protected static final String COLON_MARK = "_"; + /** Default serializer for this driver. */ private StateStoreSerializer serializer; @@ -74,4 +79,18 @@ public abstract class StateStoreSerializableImpl extends StateStoreBaseImpl { String data, Class clazz, boolean includeDates) throws IOException { return serializer.deserialize(data, clazz); } + + /** + * Get the primary key for a record. If we don't want to store in folders, we + * need to remove / from the name. + * + * @param record Record to get the primary key for. + * @return Primary key for the record. + */ + protected static String getPrimaryKey(BaseRecord record) { +String primaryKey = record.getPrimaryKey(); +primaryKey = primaryKey.replaceAll("/", SLASH_MARK); +primaryKey = primaryKey.replaceAll(":", COLON_MARK); +return primaryKey; + } } \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/1416ec5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java new file mode 100644 index 000..ddcd537 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/impl/StateStoreZooKeeperImpl.java @@ -0,0 +1,298 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you
[27/29] hadoop git commit: HDFS-12384. Fixing compilation issue with BanDuplicateClasses. Contributed by Inigo Goiri.
HDFS-12384. Fixing compilation issue with BanDuplicateClasses. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bec2f399 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bec2f399 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bec2f399 Branch: refs/heads/HDFS-10467 Commit: bec2f399c26d10f558764bc4f4c7b61f63788f31 Parents: 1416ec5 Author: Inigo GoiriAuthored: Thu Sep 7 13:53:08 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:13 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 4 .../server/federation/router/RouterRpcServer.java| 15 +++ 2 files changed, 15 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/bec2f399/hadoop-hdfs-project/hadoop-hdfs/pom.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index d22d6ee..0fe491b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -205,10 +205,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;> org.apache.curator - curator-framework - - - org.apache.curator curator-test test http://git-wip-us.apache.org/repos/asf/hadoop/blob/bec2f399/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index c77d255..f9b4a5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -81,6 +81,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -91,6 +92,7 @@ import org.apache.hadoop.hdfs.protocol.OpenFileEntry; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB; @@ -1607,6 +1609,19 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { } @Override // ClientProtocol + public void reencryptEncryptionZone(String zone, ReencryptAction action) + throws IOException { +checkOperation(OperationCategory.WRITE, false); + } + + @Override // ClientProtocol + public BatchedEntries listReencryptionStatus( + long prevId) throws IOException { +checkOperation(OperationCategory.READ, false); +return null; + } + + @Override // ClientProtocol public void setXAttr(String src, XAttr xAttr, EnumSet flag) throws IOException { checkOperation(OperationCategory.WRITE); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[15/29] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.
HDFS-10646. Federation admin tool. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15b15501 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15b15501 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15b15501 Branch: refs/heads/HDFS-10467 Commit: 15b155017883d34741d24d73186708f1d9a72b4f Parents: 3688769 Author: Inigo GoiriAuthored: Tue Aug 8 14:44:43 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:12 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 1 + .../hadoop-hdfs/src/main/bin/hdfs | 5 + .../hadoop-hdfs/src/main/bin/hdfs.cmd | 7 +- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 19 ++ .../hdfs/protocolPB/RouterAdminProtocolPB.java | 44 +++ ...uterAdminProtocolServerSideTranslatorPB.java | 151 .../RouterAdminProtocolTranslatorPB.java| 150 .../resolver/MembershipNamenodeResolver.java| 34 +- .../hdfs/server/federation/router/Router.java | 52 +++ .../federation/router/RouterAdminServer.java| 183 ++ .../server/federation/router/RouterClient.java | 76 + .../hdfs/tools/federation/RouterAdmin.java | 341 +++ .../hdfs/tools/federation/package-info.java | 28 ++ .../src/main/proto/RouterProtocol.proto | 47 +++ .../src/main/resources/hdfs-default.xml | 46 +++ .../server/federation/RouterConfigBuilder.java | 26 ++ .../server/federation/RouterDFSCluster.java | 43 ++- .../server/federation/StateStoreDFSCluster.java | 148 .../federation/router/TestRouterAdmin.java | 261 ++ 19 files changed, 1644 insertions(+), 18 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/15b15501/hadoop-hdfs-project/hadoop-hdfs/pom.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index cc7a975..93216db 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -332,6 +332,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;> editlog.proto fsimage.proto FederationProtocol.proto + RouterProtocol.proto http://git-wip-us.apache.org/repos/asf/hadoop/blob/15b15501/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index b1f44a4..d51a8e2 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -31,6 +31,7 @@ function hadoop_usage hadoop_add_option "--hosts filename" "list of hosts to use in worker mode" hadoop_add_option "--workers" "turn on worker mode" +<<< HEAD hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility" hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache" hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries" @@ -42,6 +43,7 @@ function hadoop_usage hadoop_add_subcommand "diskbalancer" daemon "Distributes data evenly among disks on a given node" hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" hadoop_add_subcommand "ec" admin "run a HDFS ErasureCoding CLI" + hadoop_add_subcommand "federation" admin "manage Router-based federation" hadoop_add_subcommand "fetchdt" client "fetch a delegation token from the NameNode" hadoop_add_subcommand "fsck" admin "run a DFS filesystem checking utility" hadoop_add_subcommand "getconf" client "get config values from configuration" @@ -181,6 +183,9 @@ function hdfscmd_case HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.Router' ;; +federation) + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.tools.federation.RouterAdmin' +;; secondarynamenode) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' http://git-wip-us.apache.org/repos/asf/hadoop/blob/15b15501/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd index b9853d6..53bdf70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd +++
[09/29] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5add7bc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java new file mode 100644 index 000..7f0b36a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java @@ -0,0 +1,483 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.driver; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; +import org.apache.hadoop.hdfs.server.federation.store.records.Query; +import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult; +import org.junit.AfterClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Base tests for the driver. The particular implementations will use this to + * test their functionality. + */ +public class TestStateStoreDriverBase { + + private static final Logger LOG = + LoggerFactory.getLogger(TestStateStoreDriverBase.class); + + private static StateStoreService stateStore; + private static Configuration conf; + + + /** + * Get the State Store driver. + * @return State Store driver. + */ + protected StateStoreDriver getStateStoreDriver() { +return stateStore.getDriver(); + } + + @AfterClass + public static void tearDownCluster() { +if (stateStore != null) { + stateStore.stop(); +} + } + + /** + * Get a new State Store using this configuration. + * + * @param config Configuration for the State Store. + * @throws Exception If we cannot get the State Store. + */ + public static void getStateStore(Configuration config) throws Exception { +conf = config; +stateStore = FederationStateStoreTestUtils.getStateStore(conf); + } + + private T generateFakeRecord(Class recordClass) + throws IllegalArgumentException, IllegalAccessException, IOException { + +// TODO add record +return null; + } + + /** + * Validate if a record is the same. + * + * @param original + * @param committed + * @param assertEquals Assert if the records are equal or just return. + * @return + * @throws IllegalArgumentException + * @throws IllegalAccessException + */ + private boolean validateRecord( + BaseRecord original, BaseRecord committed, boolean assertEquals) + throws IllegalArgumentException, IllegalAccessException { + +boolean ret = true; + +Mapfields = getFields(original); +for (String key : fields.keySet()) { + if (key.equals("dateModified") || + key.equals("dateCreated") || + key.equals("proto")) { +// Fields are updated/set on commit and fetch and may not match +// the fields that are initialized in a non-committed object. +continue; + } + Object data1 = getField(original, key); + Object data2 = getField(committed, key); + if (assertEquals) { +assertEquals("Field " + key + " does not match", data1, data2); + } else if (!data1.equals(data2)) { +ret = false; + } +} + +long now =
[07/29] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b417be8f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java new file mode 100644 index 000..3a32be1 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcClient.java @@ -0,0 +1,856 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.federation.router; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.ipc.StandbyException; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.util.concurrent.ThreadFactoryBuilder; + +/** + * A client proxy for Router -> NN communication using the NN ClientProtocol. + * + * Provides routers to invoke remote ClientProtocol methods and handle + * retries/failover. + * + * invokeSingle Make a single request to a single namespace + * invokeSequential Make a sequential series of requests to multiple + * ordered namespaces until a condition is met. + * invokeConcurrent Make concurrent requests to multiple namespaces and + * return all of the results. + * + * Also maintains a cached pool of connections to NNs. Connections are managed + * by the ConnectionManager and are unique to each user + NN. The size of the + * connection pool can be configured. Larger pools allow for more simultaneous + * requests to a single NN from a single user. + */ +public class RouterRpcClient { + + private static final Logger LOG = + LoggerFactory.getLogger(RouterRpcClient.class); + + + /** Router identifier. */ + private final String routerId; + + /** Interface to identify the active NN for a nameservice or blockpool ID. */ + private final ActiveNamenodeResolver namenodeResolver; + + /** Connection pool to the Namenodes per user for performance. */ + private final ConnectionManager connectionManager; + /** Service to run asynchronous calls. */ + private final ExecutorService executorService; + /** Retry policy for router -> NN communication. */ + private final RetryPolicy retryPolicy; + + /** Pattern to parse a stack trace line. */ + private static final Pattern STACK_TRACE_PATTERN = + Pattern.compile("\\tat (.*)\\.(.*)\\((.*):(\\d*)\\)"); + + + /** + * Create a router RPC
[05/29] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b417be8f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java index ee6f57d..2875750 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -43,7 +43,7 @@ import org.apache.hadoop.util.Time; /** * In-memory cache/mock of a namenode and file resolver. Stores the most - * recently updated NN information for each nameservice and block pool. Also + * recently updated NN information for each nameservice and block pool. It also * stores a virtual mount table for resolving global namespace paths to local NN * paths. */ @@ -51,82 +51,93 @@ public class MockResolver implements ActiveNamenodeResolver, FileSubclusterResolver { private Mapresolver = - new HashMap (); - private Map locations = - new HashMap (); - private Set namespaces = - new HashSet(); + new HashMap<>(); + private Map locations = new HashMap<>(); + private Set namespaces = new HashSet<>(); private String defaultNamespace = null; + public MockResolver(Configuration conf, StateStoreService store) { this.cleanRegistrations(); } - public void addLocation(String mount, String nameservice, String location) { -RemoteLocation remoteLocation = new RemoteLocation(nameservice, location); -List locationsList = locations.get(mount); + public void addLocation(String mount, String nsId, String location) { +List locationsList = this.locations.get(mount); if (locationsList == null) { - locationsList = new LinkedList(); - locations.put(mount, locationsList); + locationsList = new LinkedList<>(); + this.locations.put(mount, locationsList); } + +final RemoteLocation remoteLocation = new RemoteLocation(nsId, location); if (!locationsList.contains(remoteLocation)) { locationsList.add(remoteLocation); } if (this.defaultNamespace == null) { - this.defaultNamespace = nameservice; + this.defaultNamespace = nsId; } } public synchronized void cleanRegistrations() { -this.resolver = -new HashMap (); -this.namespaces = new HashSet(); +this.resolver = new HashMap<>(); +this.namespaces = new HashSet<>(); } @Override public void updateActiveNamenode( - String ns, InetSocketAddress successfulAddress) { + String nsId, InetSocketAddress successfulAddress) { String address = successfulAddress.getHostName() + ":" + successfulAddress.getPort(); -String key = ns; +String key = nsId; if (key != null) { // Update the active entry @SuppressWarnings("unchecked") - List iterator = - (List) resolver.get(key); - for (FederationNamenodeContext namenode : iterator) { + List namenodes = + (List) this.resolver.get(key); + for (FederationNamenodeContext namenode : namenodes) { if (namenode.getRpcAddress().equals(address)) { MockNamenodeContext nn = (MockNamenodeContext) namenode; nn.setState(FederationNamenodeServiceState.ACTIVE); break; } } - Collections.sort(iterator, new NamenodePriorityComparator()); + // This operation modifies the list so we need to be careful + synchronized(namenodes) { +Collections.sort(namenodes, new NamenodePriorityComparator()); + } } } @Override public List getNamenodesForNameserviceId(String nameserviceId) { -return resolver.get(nameserviceId); +// Return a copy of the list because it is updated periodically +List namenodes = +this.resolver.get(nameserviceId); +return Collections.unmodifiableList(new ArrayList<>(namenodes)); } @Override public List getNamenodesForBlockPoolId( String blockPoolId) { -return resolver.get(blockPoolId); +// Return a copy of the list because it is updated periodically +List namenodes = +this.resolver.get(blockPoolId); +return Collections.unmodifiableList(new ArrayList<>(namenodes)); } private static class MockNamenodeContext implements FederationNamenodeContext { + +private String namenodeId; +private String nameserviceId; + private String webAddress; private String rpcAddress; private String serviceAddress; private String lifelineAddress; -private String namenodeId; -private String
[12/29] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.
HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fd5f374 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fd5f374 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fd5f374 Branch: refs/heads/HDFS-10467 Commit: 1fd5f3746e08eed006d563c5b020ee3c90eef4a9 Parents: 015abcd Author: InigoAuthored: Tue Mar 28 14:30:59 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:12 2017 -0700 -- .../hadoop-hdfs/src/main/bin/hdfs | 5 + .../hadoop-hdfs/src/main/bin/hdfs.cmd | 8 +- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 17 + .../resolver/ActiveNamenodeResolver.java| 117 +++ .../resolver/FederationNamenodeContext.java | 87 +++ .../FederationNamenodeServiceState.java | 46 ++ .../resolver/FederationNamespaceInfo.java | 99 +++ .../resolver/FileSubclusterResolver.java| 75 ++ .../resolver/NamenodePriorityComparator.java| 63 ++ .../resolver/NamenodeStatusReport.java | 195 + .../federation/resolver/PathLocation.java | 122 +++ .../federation/resolver/RemoteLocation.java | 74 ++ .../federation/resolver/package-info.java | 41 + .../federation/router/FederationUtil.java | 117 +++ .../router/RemoteLocationContext.java | 38 + .../hdfs/server/federation/router/Router.java | 263 +++ .../federation/router/RouterRpcServer.java | 102 +++ .../server/federation/router/package-info.java | 31 + .../federation/store/StateStoreService.java | 77 ++ .../server/federation/store/package-info.java | 62 ++ .../src/main/resources/hdfs-default.xml | 16 + .../server/federation/FederationTestUtils.java | 233 ++ .../hdfs/server/federation/MockResolver.java| 290 +++ .../server/federation/RouterConfigBuilder.java | 40 + .../server/federation/RouterDFSCluster.java | 767 +++ .../server/federation/router/TestRouter.java| 96 +++ 26 files changed, 3080 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fd5f374/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index e6405b5..b1f44a4 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -57,6 +57,7 @@ function hadoop_usage hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an fsimage" hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer to a legacy fsimage" hadoop_add_subcommand "portmap" daemon "run a portmap service" + hadoop_add_subcommand "router" daemon "run the DFS router" hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode" hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot" hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies" @@ -176,6 +177,10 @@ function hdfscmd_case HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap ;; +router) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.Router' +;; secondarynamenode) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fd5f374/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd index 2181e47..b9853d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd @@ -59,7 +59,7 @@ if "%1" == "--loglevel" ( ) ) - set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto debug + set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto router debug for %%i in ( %hdfscommands% ) do ( if %hdfs-command% == %%i set hdfscommand=true
[18/29] hadoop git commit: HDFS-10880. Federation Mount Table State Store internal API. Contributed by Jason Kace and Inigo Goiri.
HDFS-10880. Federation Mount Table State Store internal API. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3688769f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3688769f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3688769f Branch: refs/heads/HDFS-10467 Commit: 3688769fa50dda098e790ddb9853d6905e91e13f Parents: 1433998 Author: Inigo GoiriAuthored: Fri Aug 4 18:00:12 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:12 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 7 +- .../federation/resolver/MountTableManager.java | 80 +++ .../federation/resolver/MountTableResolver.java | 544 +++ .../federation/resolver/PathLocation.java | 124 - .../resolver/order/DestinationOrder.java| 29 + .../federation/resolver/order/package-info.java | 29 + .../federation/router/FederationUtil.java | 56 +- .../hdfs/server/federation/router/Router.java | 3 +- .../federation/store/MountTableStore.java | 49 ++ .../federation/store/StateStoreService.java | 2 + .../store/impl/MountTableStoreImpl.java | 116 .../protocol/AddMountTableEntryRequest.java | 47 ++ .../protocol/AddMountTableEntryResponse.java| 42 ++ .../protocol/GetMountTableEntriesRequest.java | 49 ++ .../protocol/GetMountTableEntriesResponse.java | 53 ++ .../protocol/RemoveMountTableEntryRequest.java | 49 ++ .../protocol/RemoveMountTableEntryResponse.java | 42 ++ .../protocol/UpdateMountTableEntryRequest.java | 51 ++ .../protocol/UpdateMountTableEntryResponse.java | 43 ++ .../pb/AddMountTableEntryRequestPBImpl.java | 84 +++ .../pb/AddMountTableEntryResponsePBImpl.java| 76 +++ .../pb/GetMountTableEntriesRequestPBImpl.java | 76 +++ .../pb/GetMountTableEntriesResponsePBImpl.java | 104 .../pb/RemoveMountTableEntryRequestPBImpl.java | 76 +++ .../pb/RemoveMountTableEntryResponsePBImpl.java | 76 +++ .../pb/UpdateMountTableEntryRequestPBImpl.java | 96 .../pb/UpdateMountTableEntryResponsePBImpl.java | 76 +++ .../federation/store/records/MountTable.java| 301 ++ .../store/records/impl/pb/MountTablePBImpl.java | 213 .../src/main/proto/FederationProtocol.proto | 61 ++- .../hdfs/server/federation/MockResolver.java| 9 +- .../resolver/TestMountTableResolver.java| 396 ++ .../store/FederationStateStoreTestUtils.java| 16 + .../store/TestStateStoreMountTable.java | 250 + .../store/driver/TestStateStoreDriverBase.java | 12 + .../store/records/TestMountTable.java | 176 ++ 36 files changed, 3437 insertions(+), 76 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3688769f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index f0b0c63..4a8ddfc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -27,6 +27,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; @@ -1178,8 +1180,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { // HDFS Router State Store connection public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS = FEDERATION_ROUTER_PREFIX + "file.resolver.client.class"; - public static final String FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT = - "org.apache.hadoop.hdfs.server.federation.MockResolver"; + public static final Class + FEDERATION_FILE_RESOLVER_CLIENT_CLASS_DEFAULT = + MountTableResolver.class; public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS = FEDERATION_ROUTER_PREFIX
[13/29] hadoop git commit: HDFS-12223. Rebasing HDFS-10467. Contributed by Inigo Goiri.
HDFS-12223. Rebasing HDFS-10467. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e57aef3 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e57aef3 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e57aef3 Branch: refs/heads/HDFS-10467 Commit: 6e57aef3c62eb402a90e6894b8420a24d75c2e96 Parents: b417be8 Author: Inigo GoiriAuthored: Fri Jul 28 15:55:10 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:12 2017 -0700 -- .../federation/router/RouterRpcServer.java | 59 +--- 1 file changed, 51 insertions(+), 8 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e57aef3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 4bae71e..eaaab39 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -64,8 +64,9 @@ import org.apache.hadoop.hdfs.AddBlockFlag; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.inotify.EventBatchList; -import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.BlocksStats; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; @@ -75,6 +76,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -85,6 +87,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.OpenFileEntry; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; @@ -1736,13 +1739,6 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { } @Override // ClientProtocol - public AddingECPolicyResponse[] addErasureCodingPolicies( - ErasureCodingPolicy[] policies) throws IOException { -checkOperation(OperationCategory.WRITE, false); -return null; - } - - @Override // ClientProtocol public void unsetErasureCodingPolicy(String src) throws IOException { checkOperation(OperationCategory.WRITE, false); } @@ -1808,6 +1804,53 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { return null; } + @Override + public AddECPolicyResponse[] addErasureCodingPolicies( + ErasureCodingPolicy[] arg0) throws IOException { +checkOperation(OperationCategory.WRITE, false); +return null; + } + + @Override + public void removeErasureCodingPolicy(String arg0) throws IOException { +checkOperation(OperationCategory.WRITE, false); + } + + @Override + public void disableErasureCodingPolicy(String arg0) throws IOException { +checkOperation(OperationCategory.WRITE, false); + } + + @Override + public void enableErasureCodingPolicy(String arg0) throws IOException { +checkOperation(OperationCategory.WRITE, false); + } + + @Override + public ECBlockGroupsStats getECBlockGroupsStats() throws IOException { +checkOperation(OperationCategory.READ, false); +return null; + } + + @Override + public HashMap getErasureCodingCodecs() throws IOException { +checkOperation(OperationCategory.READ, false); +return null; + } + + @Override + public BlocksStats getBlocksStats() throws IOException { +checkOperation(OperationCategory.READ, false); +return null; + } + + @Override + public
[01/29] hadoop git commit: HDFS-12576. JournalNodes are getting started, even though dfs.namenode.shared.edits.dir is not configured. Contributed by Bharat Viswanadham. [Forced Update!]
Repository: hadoop Updated Branches: refs/heads/HDFS-10467 17c38bd8b -> 1ec7e00e8 (forced update) HDFS-12576. JournalNodes are getting started, even though dfs.namenode.shared.edits.dir is not configured. Contributed by Bharat Viswanadham. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/015abcd8 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/015abcd8 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/015abcd8 Branch: refs/heads/HDFS-10467 Commit: 015abcd8ce1d360830ee2960f1cdc743e09f1629 Parents: 27ffd43 Author: Anu EngineerAuthored: Mon Oct 2 15:40:00 2017 -0700 Committer: Anu Engineer Committed: Mon Oct 2 15:40:00 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh | 2 +- hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/015abcd8/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh index 12d5209..dbdf41e 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/start-dfs.sh @@ -146,7 +146,7 @@ fi JOURNAL_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -journalNodes 2>&-) -if [[ "{$JOURNAL_NODES-}" != $'\n' ]]; then +if [[ "${#JOURNAL_NODES}" != 0 ]]; then echo "Starting journal nodes [${JOURNAL_NODES}]" hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \ http://git-wip-us.apache.org/repos/asf/hadoop/blob/015abcd8/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh index 92974de..3da146e 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/stop-dfs.sh @@ -102,7 +102,7 @@ fi JOURNAL_NODES=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -journalNodes 2>&-) -if [[ "{$JOURNAL_NODES-}" != $'\n' ]]; then +if [[ "${#JOURNAL_NODES}" != 0 ]]; then echo "Stopping journal nodes [${JOURNAL_NODES}]" hadoop_uservar_su hdfs journalnode "${HADOOP_HDFS_HOME}/bin/hdfs" \ - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[28/29] hadoop git commit: HDFS-12381. [Documentation] Adding configuration keys for the Router. Contributed by Inigo Goiri.
HDFS-12381. [Documentation] Adding configuration keys for the Router. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ec7e00e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ec7e00e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ec7e00e Branch: refs/heads/HDFS-10467 Commit: 1ec7e00e8721c8ff4a31a70b42f083be7aaade05 Parents: 15ead41 Author: Inigo GoiriAuthored: Fri Sep 22 13:06:10 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:13 2017 -0700 -- .../src/main/resources/hdfs-default.xml | 11 +- .../src/site/markdown/HDFSRouterFederation.md | 159 +-- 2 files changed, 156 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ec7e00e/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index d58fcae..9a75f7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4652,7 +4652,8 @@ dfs.federation.router.rpc.enable true - If the RPC service to handle client requests in the router is enabled. + If true, the RPC service to handle client requests in the router is + enabled. @@ -4756,7 +4757,7 @@ dfs.federation.router.admin.enable true - If the RPC admin service to handle client requests in the router is + If true, the RPC admin service to handle client requests in the router is enabled. @@ -4810,7 +4811,7 @@ dfs.federation.router.store.enable true - If the Router connects to the State Store. + If true, the Router connects to the State Store. @@ -4858,7 +4859,7 @@ dfs.federation.router.heartbeat.enable true - Enables the Router to heartbeat into the State Store. + If true, the Router heartbeats into the State Store. @@ -4882,7 +4883,7 @@ dfs.federation.router.monitor.localnamenode.enable true - If the Router should monitor the namenode in the local machine. + If true, the Router should monitor the namenode in the local machine. http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ec7e00e/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md index f094238..1cea7f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md @@ -21,7 +21,7 @@ Introduction NameNodes have scalability limits because of the metadata overhead comprised of inodes (files and directories) and file blocks, the number of Datanode heartbeats, and the number of HDFS RPC client requests. -The common solution is to split the filesystem into smaller subclusters [HDFS Federation](.Federation.html) and provide a federated view [ViewFs](.ViewFs.html). +The common solution is to split the filesystem into smaller subclusters [HDFS Federation](./Federation.html) and provide a federated view [ViewFs](./ViewFs.html). The problem is how to maintain the split of the subclusters (e.g., namespace partition), which forces users to connect to multiple subclusters and manage the allocation of folders/files to them. @@ -35,7 +35,7 @@ This layer must be scalable, highly available, and fault tolerant. This federation layer comprises multiple components. The _Router_ component that has the same interface as a NameNode, and forwards the client requests to the correct subcluster, based on ground-truth information from a State Store. -The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](.ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters. +The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](./ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters. This approach has the same architecture as [YARN federation](../hadoop-yarn/Federation.html). ![Router-based Federation Sequence Diagram | width=800](./images/routerfederation.png) @@ -101,11 +101,11 @@ To interact with the users and the administrators, the
[03/29] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/651420b7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java new file mode 100644 index 000..1f0d556 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/UpdateNamenodeRegistrationResponse.java @@ -0,0 +1,51 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreSerializer; + +/** + * API response for overriding an existing namenode registration in the state + * store. + */ +public abstract class UpdateNamenodeRegistrationResponse { + + public static UpdateNamenodeRegistrationResponse newInstance() { +return StateStoreSerializer.newRecord( +UpdateNamenodeRegistrationResponse.class); + } + + public static UpdateNamenodeRegistrationResponse newInstance(boolean status) + throws IOException { +UpdateNamenodeRegistrationResponse response = newInstance(); +response.setResult(status); +return response; + } + + @Private + @Unstable + public abstract boolean getResult(); + + @Private + @Unstable + public abstract void setResult(boolean result); +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/651420b7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java new file mode 100644 index 000..baad113 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/FederationProtocolPBTranslator.java @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb; + +import java.io.IOException; +import java.lang.reflect.Method; + +import org.apache.commons.codec.binary.Base64; + +import com.google.protobuf.GeneratedMessage; +import com.google.protobuf.Message; +import com.google.protobuf.Message.Builder; +import com.google.protobuf.MessageOrBuilder; + +/** + * Helper class for setting/getting data elements in an object backed by a + * protobuf implementation. + */ +public class FederationProtocolPBTranslator { + + /** Optional proto byte stream used to create this object. */ + private P proto; + /** The class of the proto handler for this
[02/29] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/651420b7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java new file mode 100644 index 000..2d74505 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/resolver/TestNamenodeResolver.java @@ -0,0 +1,284 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.resolver; + +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMENODES; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.NAMESERVICES; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.ROUTERS; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.createNamenodeReport; +import static org.apache.hadoop.hdfs.server.federation.FederationTestUtils.verifyException; +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.clearRecords; +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.getStateStoreConfiguration; +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.newStateStore; +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.waitStateStore; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreUnavailableException; +import org.apache.hadoop.hdfs.server.federation.store.records.MembershipState; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * Test the basic {@link ActiveNamenodeResolver} functionality. + */ +public class TestNamenodeResolver { + + private static StateStoreService stateStore; + private static ActiveNamenodeResolver namenodeResolver; + + @BeforeClass + public static void create() throws Exception { + +Configuration conf = getStateStoreConfiguration(); + +// Reduce expirations to 5 seconds +conf.setLong( +DFSConfigKeys.FEDERATION_STORE_MEMBERSHIP_EXPIRATION_MS, +TimeUnit.SECONDS.toMillis(5)); + +stateStore = newStateStore(conf); +assertNotNull(stateStore); + +namenodeResolver = new MembershipNamenodeResolver(conf, stateStore); +namenodeResolver.setRouterId(ROUTERS[0]); + } + + @AfterClass + public static void destroy() throws Exception { +stateStore.stop(); +stateStore.close(); + } + + @Before + public void setup() throws IOException, InterruptedException { +// Wait for state store to connect +stateStore.loadDriver(); +waitStateStore(stateStore, 1); + +// Clear NN registrations +boolean cleared = clearRecords(stateStore, MembershipState.class); +assertTrue(cleared); + } + + @Test + public void testStateStoreDisconnected() throws Exception { + +// Add an entry to the store +NamenodeStatusReport report = createNamenodeReport( +NAMESERVICES[0], NAMENODES[0], HAServiceState.ACTIVE); +assertTrue(namenodeResolver.registerNamenode(report)); + +// Close the data store driver +stateStore.closeDriver(); +assertFalse(stateStore.isDriverReady()); + +// Flush the caches +stateStore.refreshCaches(true); + +// Verify commands
[20/29] hadoop git commit: HDFS-10881. Federation State Store Driver API. Contributed by Jason Kace and Inigo Goiri.
HDFS-10881. Federation State Store Driver API. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0da377aa Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0da377aa Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0da377aa Branch: refs/heads/HDFS-10467 Commit: 0da377aaae9ce1978e346c6b50cf831ab0a0e81f Parents: 1fd5f37 Author: InigoAuthored: Wed Mar 29 19:35:06 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:12 2017 -0700 -- .../store/StateStoreUnavailableException.java | 33 .../federation/store/StateStoreUtils.java | 72 +++ .../store/driver/StateStoreDriver.java | 172 + .../driver/StateStoreRecordOperations.java | 164 .../store/driver/impl/StateStoreBaseImpl.java | 69 +++ .../store/driver/impl/package-info.java | 39 .../federation/store/driver/package-info.java | 37 .../federation/store/protocol/package-info.java | 31 +++ .../federation/store/records/BaseRecord.java| 189 +++ .../federation/store/records/QueryResult.java | 56 ++ .../federation/store/records/package-info.java | 36 11 files changed, 898 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da377aa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java new file mode 100644 index 000..4e6f8c8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUnavailableException.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store; + +import java.io.IOException; + +/** + * Thrown when the state store is not reachable or available. Cached APIs and + * queries may succeed. Client should retry again later. + */ +public class StateStoreUnavailableException extends IOException { + + private static final long serialVersionUID = 1L; + + public StateStoreUnavailableException(String msg) { +super(msg); + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da377aa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java new file mode 100644 index 000..8c681df --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/StateStoreUtils.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing
[17/29] hadoop git commit: HDFS-10880. Federation Mount Table State Store internal API. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3688769f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java new file mode 100644 index 000..7f7c998 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryRequestPBImpl.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.protocol.impl.pb; + +import java.io.IOException; + +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProto; +import org.apache.hadoop.hdfs.federation.protocol.proto.HdfsServerFederationProtos.RemoveMountTableEntryRequestProtoOrBuilder; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.records.impl.pb.PBRecord; + +import com.google.protobuf.Message; + +/** + * Protobuf implementation of the state store API object + * RemoveMountTableEntryRequest. + */ +public class RemoveMountTableEntryRequestPBImpl +extends RemoveMountTableEntryRequest implements PBRecord { + + private FederationProtocolPBTranslatortranslator = + new FederationProtocolPBTranslator ( + RemoveMountTableEntryRequestProto.class); + + public RemoveMountTableEntryRequestPBImpl() { + } + + public RemoveMountTableEntryRequestPBImpl( + RemoveMountTableEntryRequestProto proto) { +this.setProto(proto); + } + + @Override + public RemoveMountTableEntryRequestProto getProto() { +return this.translator.build(); + } + + @Override + public void setProto(Message proto) { +this.translator.setProto(proto); + } + + @Override + public void readInstance(String base64String) throws IOException { +this.translator.readInstance(base64String); + } + + @Override + public String getSrcPath() { +return this.translator.getProtoOrBuilder().getSrcPath(); + } + + @Override + public void setSrcPath(String path) { +this.translator.getBuilder().setSrcPath(path); + } +} \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/3688769f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java new file mode 100644 index 000..0c943ac --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/protocol/impl/pb/RemoveMountTableEntryResponsePBImpl.java @@ -0,0 +1,76 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + *
[21/29] hadoop git commit: HDFS-12335. Federation Metrics. Contributed by Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9686d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java new file mode 100644 index 000..851538a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName; +import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; +import org.apache.hadoop.metrics2.source.JvmMetrics; + +/** + * This class is for maintaining the various Router activity statistics + * and publishing them through the metrics interfaces. + */ +@Metrics(name="RouterActivity", about="Router metrics", context="dfs") +public class RouterMetrics { + + private final MetricsRegistry registry = new MetricsRegistry("router"); + + @Metric("Duration in SafeMode at startup in msec") + private MutableGaugeInt safeModeTime; + + private JvmMetrics jvmMetrics = null; + + RouterMetrics( + String processName, String sessionId, final JvmMetrics jvmMetrics) { +this.jvmMetrics = jvmMetrics; +registry.tag(ProcessName, processName).tag(SessionId, sessionId); + } + + public static RouterMetrics create(Configuration conf) { +String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); +String processName = "Router"; +MetricsSystem ms = DefaultMetricsSystem.instance(); +JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms); + +return ms.register(new RouterMetrics(processName, sessionId, jm)); + } + + public JvmMetrics getJvmMetrics() { +return jvmMetrics; + } + + public void shutdown() { +DefaultMetricsSystem.shutdown(); + } + + public void setSafeModeTime(long elapsed) { +safeModeTime.set((int) elapsed); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9686d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java new file mode 100644 index 000..f4debce --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */
[11/29] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fd5f374/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java new file mode 100644 index 000..ee6f57d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -0,0 +1,290 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.NamenodePriorityComparator; +import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; +import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.util.Time; + +/** + * In-memory cache/mock of a namenode and file resolver. Stores the most + * recently updated NN information for each nameservice and block pool. Also + * stores a virtual mount table for resolving global namespace paths to local NN + * paths. + */ +public class MockResolver +implements ActiveNamenodeResolver, FileSubclusterResolver { + + private Mapresolver = + new HashMap (); + private Map locations = + new HashMap (); + private Set namespaces = + new HashSet(); + private String defaultNamespace = null; + + public MockResolver(Configuration conf, StateStoreService store) { +this.cleanRegistrations(); + } + + public void addLocation(String mount, String nameservice, String location) { +RemoteLocation remoteLocation = new RemoteLocation(nameservice, location); +List locationsList = locations.get(mount); +if (locationsList == null) { + locationsList = new LinkedList(); + locations.put(mount, locationsList); +} +if (!locationsList.contains(remoteLocation)) { + locationsList.add(remoteLocation); +} + +if (this.defaultNamespace == null) { + this.defaultNamespace = nameservice; +} + } + + public synchronized void cleanRegistrations() { +this.resolver = +new HashMap (); +this.namespaces = new HashSet(); + } + + @Override + public void updateActiveNamenode( + String ns, InetSocketAddress successfulAddress) { + +String address = successfulAddress.getHostName() + ":" + +successfulAddress.getPort(); +String key = ns; +if (key != null) { + // Update the active entry + @SuppressWarnings("unchecked") + List iterator = + (List) resolver.get(key); + for (FederationNamenodeContext namenode : iterator) { +if (namenode.getRpcAddress().equals(address)) { + MockNamenodeContext nn = (MockNamenodeContext) namenode; + nn.setState(FederationNamenodeServiceState.ACTIVE); + break; +} + } + Collections.sort(iterator, new NamenodePriorityComparator()); +} + } + + @Override + public List + getNamenodesForNameserviceId(String nameserviceId) { +return resolver.get(nameserviceId); + }
[29/29] hadoop git commit: HDFS-11554. [Documentation] Router-based federation documentation. Contributed by Inigo Goiri.
HDFS-11554. [Documentation] Router-based federation documentation. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b434bb28 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b434bb28 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b434bb28 Branch: refs/heads/HDFS-10467 Commit: b434bb2826be0eafde4d43e3609c77f54af36c3a Parents: 15b1550 Author: Inigo GoiriAuthored: Wed Aug 16 17:23:29 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:13 2017 -0700 -- .../src/site/markdown/HDFSRouterFederation.md | 170 +++ .../site/resources/images/routerfederation.png | Bin 0 -> 24961 bytes 2 files changed, 170 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b434bb28/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md new file mode 100644 index 000..f094238 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md @@ -0,0 +1,170 @@ + + +HDFS Router-based Federation + + + + +Introduction + + +NameNodes have scalability limits because of the metadata overhead comprised of inodes (files and directories) and file blocks, the number of Datanode heartbeats, and the number of HDFS RPC client requests. +The common solution is to split the filesystem into smaller subclusters [HDFS Federation](.Federation.html) and provide a federated view [ViewFs](.ViewFs.html). +The problem is how to maintain the split of the subclusters (e.g., namespace partition), which forces users to connect to multiple subclusters and manage the allocation of folders/files to them. + + +Architecture + + +A natural extension to this partitioned federation is to add a layer of software responsible for federating the namespaces. +This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and supports rebalancing of data across subclusters. +To accomplish these goals, the federation layer directs block accesses to the proper subcluster, maintains the state of the namespaces, and provides mechanisms for data rebalancing. +This layer must be scalable, highly available, and fault tolerant. + +This federation layer comprises multiple components. +The _Router_ component that has the same interface as a NameNode, and forwards the client requests to the correct subcluster, based on ground-truth information from a State Store. +The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](.ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters. +This approach has the same architecture as [YARN federation](../hadoop-yarn/Federation.html). + +![Router-based Federation Sequence Diagram | width=800](./images/routerfederation.png) + + +### Example flow +The simplest configuration deploys a Router on each NameNode machine. +The Router monitors the local NameNode and heartbeats the state to the State Store. +When a regular DFS client contacts any of the Routers to access a file in the federated filesystem, the Router checks the Mount Table in the State Store (i.e., the local cache) to find out which subcluster contains the file. +Then it checks the Membership table in the State Store (i.e., the local cache) for the NameNode responsible for the subcluster. +After it has identified the correct NameNode, the Router proxies the request. +The client accesses Datanodes directly. + + +### Router +There can be multiple Routers in the system with soft state. +Each Router has two roles: + +* Federated interface: expose a single, global NameNode interface to the clients and forward the requests to the active NameNode in the correct subcluster +* NameNode heartbeat: maintain the information about a NameNode in the State Store + + Federated interface +The Router receives a client request, checks the State Store for the correct subcluster, and forwards the request to the active NameNode of that subcluster. +The reply from the NameNode then flows in the opposite direction. +The Routers are stateless and can be behind a load balancer. +For performance, the Router also caches remote mount table entries and the state of the subclusters. +To make sure that changes have been propagated to all Routers, each Router heartbeats its state to the State Store. + +The communications between the Routers and the State Store are cached (with timed
[23/29] hadoop git commit: HDFS-12312. Rebasing HDFS-10467 (2). Contributed by Inigo Goiri.
HDFS-12312. Rebasing HDFS-10467 (2). Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48a8fee2 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48a8fee2 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48a8fee2 Branch: refs/heads/HDFS-10467 Commit: 48a8fee2c9c90217fb961c692999e055e75d5161 Parents: b434bb2 Author: Inigo GoiriAuthored: Wed Aug 16 17:31:37 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:13 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs | 1 - .../hadoop/hdfs/server/federation/router/RouterRpcServer.java | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a8fee2/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index d51a8e2..d122ff7 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -31,7 +31,6 @@ function hadoop_usage hadoop_add_option "--hosts filename" "list of hosts to use in worker mode" hadoop_add_option "--workers" "turn on worker mode" -<<< HEAD hadoop_add_subcommand "balancer" daemon "run a cluster balancing utility" hadoop_add_subcommand "cacheadmin" admin "configure the HDFS cache" hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries" http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a8fee2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index eaaab39..c77d255 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -1946,6 +1946,7 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { } long inodeId = 0; return new HdfsFileStatus(0, true, 0, 0, modTime, accessTime, permission, +EnumSet.noneOf(HdfsFileStatus.Flags.class), owner, group, new byte[0], DFSUtil.string2Bytes(name), inodeId, childrenNum, null, (byte) 0, null); } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[04/29] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.
HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/651420b7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/651420b7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/651420b7 Branch: refs/heads/HDFS-10467 Commit: 651420b79b114dacb31c199e6c219febc1493fda Parents: 6e57aef Author: Inigo GoiriAuthored: Mon Jul 31 10:55:21 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:12 2017 -0700 -- .../dev-support/findbugsExcludeFile.xml | 3 + hadoop-hdfs-project/hadoop-hdfs/pom.xml | 1 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 17 +- .../resolver/MembershipNamenodeResolver.java| 290 .../federation/router/FederationUtil.java | 42 +- .../federation/store/CachedRecordStore.java | 237 ++ .../federation/store/MembershipStore.java | 126 + .../federation/store/StateStoreCache.java | 36 ++ .../store/StateStoreCacheUpdateService.java | 67 +++ .../federation/store/StateStoreService.java | 202 +++- .../store/impl/MembershipStoreImpl.java | 311 + .../federation/store/impl/package-info.java | 31 ++ .../GetNamenodeRegistrationsRequest.java| 52 +++ .../GetNamenodeRegistrationsResponse.java | 55 +++ .../store/protocol/GetNamespaceInfoRequest.java | 30 ++ .../protocol/GetNamespaceInfoResponse.java | 52 +++ .../protocol/NamenodeHeartbeatRequest.java | 52 +++ .../protocol/NamenodeHeartbeatResponse.java | 49 ++ .../UpdateNamenodeRegistrationRequest.java | 72 +++ .../UpdateNamenodeRegistrationResponse.java | 51 ++ .../impl/pb/FederationProtocolPBTranslator.java | 145 ++ .../GetNamenodeRegistrationsRequestPBImpl.java | 87 .../GetNamenodeRegistrationsResponsePBImpl.java | 99 .../impl/pb/GetNamespaceInfoRequestPBImpl.java | 60 +++ .../impl/pb/GetNamespaceInfoResponsePBImpl.java | 95 .../impl/pb/NamenodeHeartbeatRequestPBImpl.java | 93 .../pb/NamenodeHeartbeatResponsePBImpl.java | 71 +++ ...UpdateNamenodeRegistrationRequestPBImpl.java | 95 ...pdateNamenodeRegistrationResponsePBImpl.java | 73 +++ .../store/protocol/impl/pb/package-info.java| 29 ++ .../store/records/MembershipState.java | 329 + .../store/records/MembershipStats.java | 126 + .../records/impl/pb/MembershipStatePBImpl.java | 334 + .../records/impl/pb/MembershipStatsPBImpl.java | 191 .../src/main/proto/FederationProtocol.proto | 107 + .../src/main/resources/hdfs-default.xml | 18 +- .../resolver/TestNamenodeResolver.java | 284 .../store/FederationStateStoreTestUtils.java| 23 +- .../federation/store/TestStateStoreBase.java| 81 .../store/TestStateStoreMembershipState.java| 463 +++ .../store/driver/TestStateStoreDriverBase.java | 69 ++- .../store/records/TestMembershipState.java | 129 ++ 42 files changed, 4745 insertions(+), 32 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/651420b7/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 9582fcb..4b958b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -15,6 +15,9 @@ + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/651420b7/hadoop-hdfs-project/hadoop-hdfs/pom.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 425572f..cc7a975 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -331,6 +331,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;> QJournalProtocol.proto editlog.proto fsimage.proto + FederationProtocol.proto http://git-wip-us.apache.org/repos/asf/hadoop/blob/651420b7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
[06/29] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b417be8f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 24792bb..4bae71e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -17,16 +17,109 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_KEY; + +import java.io.FileNotFoundException; import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CryptoProtocolVersion; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; +import org.apache.hadoop.fs.CacheFlag; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.QuotaUsage; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.AddBlockFlag; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.inotify.EventBatchList; +import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import
[08/29] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.
HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b417be8f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b417be8f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b417be8f Branch: refs/heads/HDFS-10467 Commit: b417be8f87f9b2c90e10469a1fa0195f80cd88dd Parents: a5add7bc Author: Inigo GoiriAuthored: Thu May 11 09:57:03 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:12 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 38 + .../resolver/FederationNamespaceInfo.java | 46 +- .../federation/resolver/RemoteLocation.java | 46 +- .../federation/router/ConnectionContext.java| 104 + .../federation/router/ConnectionManager.java| 408 .../federation/router/ConnectionPool.java | 314 +++ .../federation/router/ConnectionPoolId.java | 117 ++ .../router/RemoteLocationContext.java | 38 +- .../server/federation/router/RemoteMethod.java | 164 ++ .../server/federation/router/RemoteParam.java | 71 + .../hdfs/server/federation/router/Router.java | 58 +- .../federation/router/RouterRpcClient.java | 856 .../federation/router/RouterRpcServer.java | 1867 +- .../src/main/resources/hdfs-default.xml | 95 + .../server/federation/FederationTestUtils.java | 80 +- .../hdfs/server/federation/MockResolver.java| 90 +- .../server/federation/RouterConfigBuilder.java | 20 +- .../server/federation/RouterDFSCluster.java | 535 +++-- .../server/federation/router/TestRouter.java| 31 +- .../server/federation/router/TestRouterRpc.java | 869 .../router/TestRouterRpcMultiDestination.java | 216 ++ 21 files changed, 5675 insertions(+), 388 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b417be8f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 10074ce..c7b4c01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1120,6 +1120,44 @@ public class DFSConfigKeys extends CommonConfigurationKeys { // HDFS Router-based federation public static final String FEDERATION_ROUTER_PREFIX = "dfs.federation.router."; + public static final String DFS_ROUTER_DEFAULT_NAMESERVICE = + FEDERATION_ROUTER_PREFIX + "default.nameserviceId"; + public static final String DFS_ROUTER_HANDLER_COUNT_KEY = + FEDERATION_ROUTER_PREFIX + "handler.count"; + public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10; + public static final String DFS_ROUTER_READER_QUEUE_SIZE_KEY = + FEDERATION_ROUTER_PREFIX + "reader.queue.size"; + public static final int DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT = 100; + public static final String DFS_ROUTER_READER_COUNT_KEY = + FEDERATION_ROUTER_PREFIX + "reader.count"; + public static final int DFS_ROUTER_READER_COUNT_DEFAULT = 1; + public static final String DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY = + FEDERATION_ROUTER_PREFIX + "handler.queue.size"; + public static final int DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT = 100; + public static final String DFS_ROUTER_RPC_BIND_HOST_KEY = + FEDERATION_ROUTER_PREFIX + "rpc-bind-host"; + public static final int DFS_ROUTER_RPC_PORT_DEFAULT = ; + public static final String DFS_ROUTER_RPC_ADDRESS_KEY = + FEDERATION_ROUTER_PREFIX + "rpc-address"; + public static final String DFS_ROUTER_RPC_ADDRESS_DEFAULT = + "0.0.0.0:" + DFS_ROUTER_RPC_PORT_DEFAULT; + public static final String DFS_ROUTER_RPC_ENABLE = + FEDERATION_ROUTER_PREFIX + "rpc.enable"; + public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true; + + // HDFS Router NN client + public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE = + FEDERATION_ROUTER_PREFIX + "connection.pool-size"; + public static final int DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT = + 64; + public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN = + FEDERATION_ROUTER_PREFIX + "connection.pool.clean.ms"; + public static final long DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN_DEFAULT = + TimeUnit.MINUTES.toMillis(1); + public static final String DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS = + FEDERATION_ROUTER_PREFIX + "connection.clean.ms"; + public
[19/29] hadoop git commit: HDFS-10882. Federation State Store Interface API. Contributed by Jason Kace and Inigo Goiri.
HDFS-10882. Federation State Store Interface API. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c88794b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c88794b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c88794b Branch: refs/heads/HDFS-10467 Commit: 3c88794b1be68a8c3f6f80a3936c1245bb780135 Parents: 0da377a Author: InigoAuthored: Thu Apr 6 19:18:52 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:12 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 11 ++ .../server/federation/store/RecordStore.java| 100 .../store/driver/StateStoreSerializer.java | 119 +++ .../driver/impl/StateStoreSerializerPBImpl.java | 115 ++ .../store/records/impl/pb/PBRecord.java | 47 .../store/records/impl/pb/package-info.java | 29 + .../src/main/resources/hdfs-default.xml | 8 ++ 7 files changed, 429 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c88794b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 136665b..b8f13f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker; +import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl; import org.apache.hadoop.http.HttpConfig; /** @@ -1126,6 +1127,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String FEDERATION_NAMENODE_RESOLVER_CLIENT_CLASS_DEFAULT = "org.apache.hadoop.hdfs.server.federation.MockResolver"; + // HDFS Router-based federation State Store + public static final String FEDERATION_STORE_PREFIX = + FEDERATION_ROUTER_PREFIX + "store."; + + public static final String FEDERATION_STORE_SERIALIZER_CLASS = + DFSConfigKeys.FEDERATION_STORE_PREFIX + "serializer"; + public static final Class + FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT = + StateStoreSerializerPBImpl.class; + // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry @Deprecated public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c88794b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java new file mode 100644 index 000..524f432 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/RecordStore.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store; + +import java.lang.reflect.Constructor; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import
[24/29] hadoop git commit: HDFS-12450. Fixing TestNamenodeHeartbeat and support non-HA. Contributed by Inigo Goiri.
HDFS-12450. Fixing TestNamenodeHeartbeat and support non-HA. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/15ead41d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/15ead41d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/15ead41d Branch: refs/heads/HDFS-10467 Commit: 15ead41defac49bb7b37002691d6f2c1bda85ac0 Parents: 2f4501f Author: Inigo GoiriAuthored: Fri Sep 15 16:02:12 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:13 2017 -0700 -- .../router/NamenodeHeartbeatService.java| 47 .../server/federation/RouterDFSCluster.java | 23 +- 2 files changed, 50 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/15ead41d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java index fe4f939..38f63e5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/NamenodeHeartbeatService.java @@ -94,8 +94,9 @@ public class NamenodeHeartbeatService extends PeriodicService { */ public NamenodeHeartbeatService( ActiveNamenodeResolver resolver, String nsId, String nnId) { -super(NamenodeHeartbeatService.class.getSimpleName() + " " + nsId + " " + -nnId); +super(NamenodeHeartbeatService.class.getSimpleName() + +(nsId == null ? "" : " " + nsId) + +(nnId == null ? "" : " " + nnId)); this.resolver = resolver; @@ -109,28 +110,28 @@ public class NamenodeHeartbeatService extends PeriodicService { this.conf = configuration; +String nnDesc = nameserviceId; if (this.namenodeId != null && !this.namenodeId.isEmpty()) { this.localTarget = new NNHAServiceTarget( conf, nameserviceId, namenodeId); + nnDesc += "-" + namenodeId; } else { this.localTarget = null; } // Get the RPC address for the clients to connect this.rpcAddress = getRpcAddress(conf, nameserviceId, namenodeId); -LOG.info("{}-{} RPC address: {}", -nameserviceId, namenodeId, rpcAddress); +LOG.info("{} RPC address: {}", nnDesc, rpcAddress); // Get the Service RPC address for monitoring this.serviceAddress = DFSUtil.getNamenodeServiceAddr(conf, nameserviceId, namenodeId); if (this.serviceAddress == null) { - LOG.error("Cannot locate RPC service address for NN {}-{}, " + - "using RPC address {}", nameserviceId, namenodeId, this.rpcAddress); + LOG.error("Cannot locate RPC service address for NN {}, " + + "using RPC address {}", nnDesc, this.rpcAddress); this.serviceAddress = this.rpcAddress; } -LOG.info("{}-{} Service RPC address: {}", -nameserviceId, namenodeId, serviceAddress); +LOG.info("{} Service RPC address: {}", nnDesc, serviceAddress); // Get the Lifeline RPC address for faster monitoring this.lifelineAddress = @@ -138,13 +139,12 @@ public class NamenodeHeartbeatService extends PeriodicService { if (this.lifelineAddress == null) { this.lifelineAddress = this.serviceAddress; } -LOG.info("{}-{} Lifeline RPC address: {}", -nameserviceId, namenodeId, lifelineAddress); +LOG.info("{} Lifeline RPC address: {}", nnDesc, lifelineAddress); // Get the Web address for UI this.webAddress = DFSUtil.getNamenodeWebAddr(conf, nameserviceId, namenodeId); -LOG.info("{}-{} Web address: {}", nameserviceId, namenodeId, webAddress); +LOG.info("{} Web address: {}", nnDesc, webAddress); this.setIntervalMs(conf.getLong( DFS_ROUTER_HEARTBEAT_INTERVAL_MS, @@ -173,7 +173,7 @@ public class NamenodeHeartbeatService extends PeriodicService { String confKey = DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; String ret = conf.get(confKey); -if (nsId != null && nnId != null) { +if (nsId != null || nnId != null) { // Get if for the proper nameservice and namenode confKey = DFSUtil.addKeySuffixes(confKey, nsId, nnId); ret = conf.get(confKey); @@ -182,10 +182,16 @@ public class NamenodeHeartbeatService extends PeriodicService { if (ret == null) { Map
[10/29] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.
HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5add7bc Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5add7bc Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5add7bc Branch: refs/heads/HDFS-10467 Commit: a5add7bc1a04c916bd7eb0e1634175f8a3e72424 Parents: 3c88794 Author: Inigo GoiriAuthored: Tue May 2 15:49:53 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:12 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 14 + .../federation/router/PeriodicService.java | 198 .../StateStoreConnectionMonitorService.java | 67 +++ .../federation/store/StateStoreService.java | 152 +- .../federation/store/StateStoreUtils.java | 51 +- .../store/driver/StateStoreDriver.java | 31 +- .../driver/StateStoreRecordOperations.java | 17 +- .../store/driver/impl/StateStoreBaseImpl.java | 31 +- .../driver/impl/StateStoreFileBaseImpl.java | 429 .../store/driver/impl/StateStoreFileImpl.java | 161 +++ .../driver/impl/StateStoreFileSystemImpl.java | 178 +++ .../driver/impl/StateStoreSerializableImpl.java | 77 +++ .../federation/store/records/BaseRecord.java| 20 +- .../server/federation/store/records/Query.java | 66 +++ .../src/main/resources/hdfs-default.xml | 16 + .../store/FederationStateStoreTestUtils.java| 232 + .../store/driver/TestStateStoreDriverBase.java | 483 +++ .../store/driver/TestStateStoreFile.java| 64 +++ .../store/driver/TestStateStoreFileSystem.java | 88 19 files changed, 2329 insertions(+), 46 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5add7bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b8f13f3..10074ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdfs; +import java.util.concurrent.TimeUnit; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; @@ -25,6 +27,8 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant; import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker; +import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; +import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl; import org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl; import org.apache.hadoop.http.HttpConfig; @@ -1137,6 +1141,16 @@ public class DFSConfigKeys extends CommonConfigurationKeys { FEDERATION_STORE_SERIALIZER_CLASS_DEFAULT = StateStoreSerializerPBImpl.class; + public static final String FEDERATION_STORE_DRIVER_CLASS = + FEDERATION_STORE_PREFIX + "driver.class"; + public static final Class + FEDERATION_STORE_DRIVER_CLASS_DEFAULT = StateStoreFileImpl.class; + + public static final String FEDERATION_STORE_CONNECTION_TEST_MS = + FEDERATION_STORE_PREFIX + "connection.test"; + public static final long FEDERATION_STORE_CONNECTION_TEST_MS_DEFAULT = + TimeUnit.MINUTES.toMillis(1); + // dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry @Deprecated public static final String DFS_CLIENT_RETRY_POLICY_ENABLED_KEY http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5add7bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/PeriodicService.java new file mode 100644 index 000..5e1 --- /dev/null +++
[25/29] hadoop git commit: HDFS-12430. Rebasing HDFS-10467 After HDFS-12269 and HDFS-12218. Contributed by Inigo Goiri.
HDFS-12430. Rebasing HDFS-10467 After HDFS-12269 and HDFS-12218. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f4501f5 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f4501f5 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f4501f5 Branch: refs/heads/HDFS-10467 Commit: 2f4501f5dc6d7aa11819eed94ecdb619e100a35d Parents: da9686d Author: Inigo GoiriAuthored: Wed Sep 13 09:15:13 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:13 2017 -0700 -- .../hdfs/server/federation/router/RouterRpcServer.java| 10 +- 1 file changed, 5 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f4501f5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 6aee1ee..1fa1720 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.inotify.EventBatchList; import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.BlocksStats; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; @@ -76,7 +75,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats; +import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -89,6 +88,7 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.OpenFileEntry; +import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; @@ -1879,19 +1879,19 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { } @Override - public ECBlockGroupsStats getECBlockGroupsStats() throws IOException { + public ECBlockGroupStats getECBlockGroupStats() throws IOException { checkOperation(OperationCategory.READ, false); return null; } @Override - public HashMap getErasureCodingCodecs() throws IOException { + public Map getErasureCodingCodecs() throws IOException { checkOperation(OperationCategory.READ, false); return null; } @Override - public BlocksStats getBlocksStats() throws IOException { + public ReplicatedBlockStats getReplicatedBlockStats() throws IOException { checkOperation(OperationCategory.READ, false); return null; } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[22/29] hadoop git commit: HDFS-12335. Federation Metrics. Contributed by Inigo Goiri.
HDFS-12335. Federation Metrics. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da9686d8 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da9686d8 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da9686d8 Branch: refs/heads/HDFS-10467 Commit: da9686d8b74d4de583d630f3746f3cdfc365e817 Parents: bec2f39 Author: Inigo GoiriAuthored: Fri Sep 8 09:37:10 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 16:44:13 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 11 + .../federation/metrics/FederationMBean.java | 204 ++ .../federation/metrics/FederationMetrics.java | 673 +++ .../federation/metrics/FederationRPCMBean.java | 90 +++ .../metrics/FederationRPCMetrics.java | 239 +++ .../FederationRPCPerformanceMonitor.java| 211 ++ .../federation/metrics/NamenodeBeanMetrics.java | 624 + .../federation/metrics/StateStoreMBean.java | 45 ++ .../federation/metrics/StateStoreMetrics.java | 144 .../server/federation/metrics/package-info.java | 27 + .../federation/router/ConnectionManager.java| 23 + .../federation/router/ConnectionPool.java | 23 + .../hdfs/server/federation/router/Router.java | 62 ++ .../server/federation/router/RouterMetrics.java | 73 ++ .../federation/router/RouterMetricsService.java | 108 +++ .../federation/router/RouterRpcClient.java | 39 +- .../federation/router/RouterRpcMonitor.java | 95 +++ .../federation/router/RouterRpcServer.java | 63 +- .../federation/store/CachedRecordStore.java | 8 + .../federation/store/StateStoreService.java | 42 +- .../store/driver/StateStoreDriver.java | 17 +- .../driver/impl/StateStoreSerializableImpl.java | 6 +- .../driver/impl/StateStoreZooKeeperImpl.java| 26 + .../store/records/MembershipState.java | 2 +- .../federation/store/records/MountTable.java| 23 + .../records/impl/pb/MembershipStatePBImpl.java | 5 +- .../src/main/resources/hdfs-default.xml | 19 +- .../server/federation/FederationTestUtils.java | 13 + .../server/federation/RouterConfigBuilder.java | 13 + .../metrics/TestFederationMetrics.java | 237 +++ .../federation/metrics/TestMetricsBase.java | 150 + .../server/federation/router/TestRouter.java| 23 +- .../store/driver/TestStateStoreDriverBase.java | 69 ++ 33 files changed, 3383 insertions(+), 24 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/da9686d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 7bb08af..57251a7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -29,6 +29,8 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFau import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.RamDiskReplicaLruTracker; import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver; +import org.apache.hadoop.hdfs.server.federation.router.RouterRpcMonitor; +import org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformanceMonitor; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver; import org.apache.hadoop.hdfs.server.federation.store.driver.StateStoreDriver; @@ -1149,6 +1151,15 @@ public class DFSConfigKeys extends CommonConfigurationKeys { FEDERATION_ROUTER_PREFIX + "rpc.enable"; public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true; + public static final String DFS_ROUTER_METRICS_ENABLE = + FEDERATION_ROUTER_PREFIX + "metrics.enable"; + public static final boolean DFS_ROUTER_METRICS_ENABLE_DEFAULT = true; + public static final String DFS_ROUTER_METRICS_CLASS = + FEDERATION_ROUTER_PREFIX + "metrics.class"; + public static final Class + DFS_ROUTER_METRICS_CLASS_DEFAULT = + FederationRPCPerformanceMonitor.class; + // HDFS Router heartbeat public static final String DFS_ROUTER_HEARTBEAT_ENABLE = FEDERATION_ROUTER_PREFIX + "heartbeat.enable";
hadoop git commit: YARN-7259. Add size-based rolling policy to LogAggregationIndexedFileController. (xgong via wangda)
Repository: hadoop Updated Branches: refs/heads/branch-3.0 aef3e4b6a -> c2f751cb0 YARN-7259. Add size-based rolling policy to LogAggregationIndexedFileController. (xgong via wangda) Change-Id: Ifaf82c0aee6b73b9b6ebf103aa72e131e3942f31 (cherry picked from commit 280080fad01304c85a9ede4d4f7b707eb36c0155) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2f751cb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2f751cb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2f751cb Branch: refs/heads/branch-3.0 Commit: c2f751cb0f002214d6fc8f9c9a50522b397b070a Parents: aef3e4b Author: Wangda TanAuthored: Mon Oct 2 15:30:22 2017 -0700 Committer: Xuan Committed: Mon Oct 2 16:37:27 2017 -0700 -- .../ifile/IndexedFileAggregatedLogsBlock.java | 14 +- .../LogAggregationIndexedFileController.java| 397 +-- .../TestLogAggregationIndexFileController.java | 67 +++- 3 files changed, 340 insertions(+), 138 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2f751cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java index c4cbfda..5439b53 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java @@ -101,10 +101,9 @@ public class IndexedFileAggregatedLogsBlock extends LogAggregationHtmlBlock { return; } -Map checkSumFiles; +Map checkSumFiles; try { - checkSumFiles = fileController.filterFiles(nodeFiles, - LogAggregationIndexedFileController.CHECK_SUM_FILE_SUFFIX); + checkSumFiles = fileController.parseCheckSumFiles(nodeFiles); } catch (IOException ex) { LOG.error("Error getting logs for " + logEntity, ex); html.h1("Error getting logs for " + logEntity); @@ -125,12 +124,11 @@ public class IndexedFileAggregatedLogsBlock extends LogAggregationHtmlBlock { String desiredLogType = $(CONTAINER_LOG_TYPE); try { for (FileStatus thisNodeFile : fileToRead) { -FileStatus checkSum = fileController.getAllChecksumFiles( -checkSumFiles, thisNodeFile.getPath().getName()); +Long checkSumIndex = checkSumFiles.get( +thisNodeFile.getPath().getName()); long endIndex = -1; -if (checkSum != null) { - endIndex = fileController.loadIndexedLogsCheckSum( - checkSum.getPath()); +if (checkSumIndex != null) { + endIndex = checkSumIndex.longValue(); } IndexedLogsMeta indexedLogsMeta = null; try { http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2f751cb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java index 243945e..800c0a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java @@ -29,6 +29,8 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.Serializable; import java.nio.charset.Charset; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; @@ -41,7 +43,6 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import
hadoop git commit: YARN-7194. Log aggregation status is always Failed with the newly added log aggregation IndexedFileFormat. Contributed by Xuan Gong.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 eb76d3dbe -> aef3e4b6a YARN-7194. Log aggregation status is always Failed with the newly added log aggregation IndexedFileFormat. Contributed by Xuan Gong. (cherry picked from commit c92c1d521eadfd8a4cd8205cc6aee74816f353f4) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aef3e4b6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aef3e4b6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aef3e4b6 Branch: refs/heads/branch-3.0 Commit: aef3e4b6af0368bfdc47fbdca3899d337e0d94bf Parents: eb76d3d Author: Junping DuAuthored: Wed Sep 13 22:16:06 2017 -0700 Committer: Xuan Committed: Mon Oct 2 16:35:27 2017 -0700 -- .../ifile/LogAggregationIndexedFileController.java | 3 ++- .../ifile/TestLogAggregationIndexFileController.java | 6 ++ 2 files changed, 4 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/aef3e4b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java index 6cb2062..243945e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java @@ -392,7 +392,8 @@ public class LogAggregationIndexedFileController this.fsDataOStream.writeInt(length); byte[] separator = this.uuid.getBytes(Charset.forName("UTF-8")); this.fsDataOStream.write(separator); -if (logAggregationSuccessfullyInThisCyCle) { +if (logAggregationSuccessfullyInThisCyCle && +record.isLogAggregationInRolling()) { deleteFileWithRetries(fc, ugi, remoteLogCheckSumFile); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/aef3e4b6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java index 5f61710..f77ad96 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/TestLogAggregationIndexFileController.java @@ -164,9 +164,7 @@ public class TestLogAggregationIndexFileController { fileFormat.initializeWriter(context); fileFormat.write(key1, value); -LogAggregationFileControllerContext record = mock( -LogAggregationFileControllerContext.class); -fileFormat.postWrite(record); +fileFormat.postWrite(context); fileFormat.closeWriter(); ContainerLogsRequest logRequest = new ContainerLogsRequest(); @@ -267,7 +265,7 @@ public class TestLogAggregationIndexFileController { // first write and second write fileFormat.initializeWriter(context); fileFormat.write(key1, value2); -fileFormat.postWrite(record); +fileFormat.postWrite(context); fileFormat.closeWriter(); fileFormat.readAggregatedLogsMeta( logRequest); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[2/2] hadoop git commit: Update CHANGES.txt for HDFS-8865
Update CHANGES.txt for HDFS-8865 Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06139561 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06139561 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06139561 Branch: refs/heads/branch-2.7 Commit: 0613956156d5775ae46f770b184804c47e81a55a Parents: 21db218 Author: Xiao ChenAuthored: Mon Oct 2 16:12:25 2017 -0700 Committer: Xiao Chen Committed: Mon Oct 2 16:21:04 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/06139561/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3309a4a..73d63ad 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -35,6 +35,8 @@ Release 2.7.5 - UNRELEASED TestRefreshUserMappings.testRefreshSuperUserGroupsConfiguration test failure. (Rakesh R via kihwal) +HDFS-8865. Improve quota initialization performance. Contributed by Kihwal Lee. + Release 2.7.4 - 2017-08-04 INCOMPATIBLE CHANGES @@ -503,8 +505,6 @@ Release 2.7.3 - 2016-08-25 HDFS-8709. Clarify automatic sync in FSEditLog#logEdit. (wang) -HDFS-8865. Improve quota initialization performance. (kihwal) - OPTIMIZATIONS HDFS-8845. DiskChecker should not traverse the entire tree (Chang Li via - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[2/2] hadoop git commit: Update CHANGES.txt for HDFS-8865
Update CHANGES.txt for HDFS-8865 Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/312bfbf7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/312bfbf7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/312bfbf7 Branch: refs/heads/branch-2.6 Commit: 312bfbf794dec870cedcb8ede52d4c531b465f62 Parents: cfa5595 Author: Xiao ChenAuthored: Mon Oct 2 16:12:25 2017 -0700 Committer: Xiao Chen Committed: Mon Oct 2 16:21:12 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/312bfbf7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8f30159..2d2228a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -30,6 +30,8 @@ Release 2.6.6 - UNRELEASED HDFS-9743. Fix TestLazyPersistFiles#testFallbackToDiskFull (kihwal) +HDFS-8865. Improve quota initialization performance. Contributed by Kihwal Lee. + Release 2.6.5 - 2016-10-08 INCOMPATIBLE CHANGES @@ -61,8 +63,6 @@ Release 2.6.5 - 2016-10-08 HDFS-10870. Wrong dfs.namenode.acls.enabled default in HdfsPermissionsGuide.apt.vm. (John Zhuge via lei) -HDFS-8865. Improve quota initialization performance. (kihwal) - OPTIMIZATIONS HDFS-10653. Optimize conversion from path string to components. (Daryn - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[1/2] hadoop git commit: HDFS-9003. ForkJoin thread pool leaks. Contributed by Kihwal Lee.
Repository: hadoop Updated Branches: refs/heads/branch-2.7 21db218fd -> 171a0bd51 HDFS-9003. ForkJoin thread pool leaks. Contributed by Kihwal Lee. (cherry picked from commit de928d566a119f0b7fa5f171719642cd86be0af7) (cherry picked from commit 1d56325a80cdac5820079ac04fa18a7e5126f8ef) Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/171a0bd5 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/171a0bd5 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/171a0bd5 Branch: refs/heads/branch-2.7 Commit: 171a0bd5175d0b854edc79ce46bd42c2c42ae600 Parents: 0613956 Author: Jing ZhaoAuthored: Wed Sep 2 15:19:04 2015 -0700 Committer: Xiao Chen Committed: Mon Oct 2 16:21:04 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 2 ++ .../main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java | 1 + 2 files changed, 3 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/171a0bd5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 73d63ad..250e6dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -37,6 +37,8 @@ Release 2.7.5 - UNRELEASED HDFS-8865. Improve quota initialization performance. Contributed by Kihwal Lee. +HDFS-9003. ForkJoin thread pool leaks. (Kihwal Lee via jing9) + Release 2.7.4 - 2017-08-04 INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/171a0bd5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 2da0d3b..c40a610 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -935,6 +935,7 @@ public class FSImage implements Closeable { root, counts); p.execute(task); task.join(); +p.shutdown(); LOG.info("Quota initialization completed in " + (Time.now() - start) + " milliseconds\n" + counts); } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[1/2] hadoop git commit: HDFS-9003. ForkJoin thread pool leaks. Contributed by Kihwal Lee.
Repository: hadoop Updated Branches: refs/heads/branch-2.6 cfa5595ac -> f638ff904 HDFS-9003. ForkJoin thread pool leaks. Contributed by Kihwal Lee. (cherry picked from commit de928d566a119f0b7fa5f171719642cd86be0af7) (cherry picked from commit 1d56325a80cdac5820079ac04fa18a7e5126f8ef) Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (cherry picked from commit 879567d24a6e3b625f639a88137fb1654c54c18f) Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f638ff90 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f638ff90 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f638ff90 Branch: refs/heads/branch-2.6 Commit: f638ff90420768d54bcbad0ac4a1850ce001611d Parents: 312bfbf Author: Jing ZhaoAuthored: Wed Sep 2 15:19:04 2015 -0700 Committer: Xiao Chen Committed: Mon Oct 2 16:21:12 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 2 ++ .../main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java | 1 + 2 files changed, 3 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f638ff90/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2d2228a..36fc64f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -32,6 +32,8 @@ Release 2.6.6 - UNRELEASED HDFS-8865. Improve quota initialization performance. Contributed by Kihwal Lee. +HDFS-9003. ForkJoin thread pool leaks. (Kihwal Lee via jing9) + Release 2.6.5 - 2016-10-08 INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/f638ff90/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index 9490c2c..9576869 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -871,6 +871,7 @@ public class FSImage implements Closeable { RecursiveAction task = new InitQuotaTask(root, counts); p.execute(task); task.join(); +p.shutdown(); LOG.info("Quota initialization completed in " + (Time.now() - start) + " milliseconds\n" + counts); } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-7259. Add size-based rolling policy to LogAggregationIndexedFileController. (xgong via wangda)
Repository: hadoop Updated Branches: refs/heads/branch-2 1eecf8a97 -> 8beae14a0 YARN-7259. Add size-based rolling policy to LogAggregationIndexedFileController. (xgong via wangda) Change-Id: Ifaf82c0aee6b73b9b6ebf103aa72e131e3942f31 (cherry picked from commit 280080fad01304c85a9ede4d4f7b707eb36c0155) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8beae14a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8beae14a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8beae14a Branch: refs/heads/branch-2 Commit: 8beae14a0794857d867e8c66c5882a614302decb Parents: 1eecf8a Author: Wangda TanAuthored: Mon Oct 2 15:30:22 2017 -0700 Committer: Wangda Tan Committed: Mon Oct 2 15:36:11 2017 -0700 -- .../ifile/IndexedFileAggregatedLogsBlock.java | 14 +- .../LogAggregationIndexedFileController.java| 397 +-- .../TestLogAggregationIndexFileController.java | 67 +++- 3 files changed, 340 insertions(+), 138 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8beae14a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java index 7f4441d..021e011 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java @@ -101,10 +101,9 @@ public class IndexedFileAggregatedLogsBlock extends LogAggregationHtmlBlock { return; } -Map checkSumFiles; +Map checkSumFiles; try { - checkSumFiles = fileController.filterFiles(nodeFiles, - LogAggregationIndexedFileController.CHECK_SUM_FILE_SUFFIX); + checkSumFiles = fileController.parseCheckSumFiles(nodeFiles); } catch (IOException ex) { LOG.error("Error getting logs for " + logEntity, ex); html.h1("Error getting logs for " + logEntity); @@ -125,12 +124,11 @@ public class IndexedFileAggregatedLogsBlock extends LogAggregationHtmlBlock { String desiredLogType = $(CONTAINER_LOG_TYPE); try { for (FileStatus thisNodeFile : fileToRead) { -FileStatus checkSum = fileController.getAllChecksumFiles( -checkSumFiles, thisNodeFile.getPath().getName()); +Long checkSumIndex = checkSumFiles.get( +thisNodeFile.getPath().getName()); long endIndex = -1; -if (checkSum != null) { - endIndex = fileController.loadIndexedLogsCheckSum( - checkSum.getPath()); +if (checkSumIndex != null) { + endIndex = checkSumIndex.longValue(); } IndexedLogsMeta indexedLogsMeta = null; try { http://git-wip-us.apache.org/repos/asf/hadoop/blob/8beae14a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java index 243945e..800c0a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java @@ -29,6 +29,8 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.Serializable; import java.nio.charset.Charset; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; @@ -41,7 +43,6 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import
[4/5] hadoop git commit: HDFS-9743. Fix TestLazyPersistFiles#testFallbackToDiskFull in branch-2.7. Contributed by Kihwal Lee.
HDFS-9743. Fix TestLazyPersistFiles#testFallbackToDiskFull in branch-2.7. Contributed by Kihwal Lee. (cherry picked from commit dd11c8274dede96a7687cc48edee5df8aa65c9b4) Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71c85622 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71c85622 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71c85622 Branch: refs/heads/branch-2.6 Commit: 71c856225c5f1ed38c42aa8b2feb5f3a589012d3 Parents: 88d7dcd Author: Xiao ChenAuthored: Mon Oct 2 15:34:08 2017 -0700 Committer: Xiao Chen Committed: Mon Oct 2 15:53:46 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../server/datanode/fsdataset/impl/LazyPersistTestCase.java | 7 +++ 2 files changed, 9 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/71c85622/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f5bf5bd..1d02587 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -28,6 +28,8 @@ Release 2.6.6 - UNRELEASED HDFS-10270. TestJMXGet:testNameNode() fails. Contributed by Gergely Novák. +HDFS-9743. Fix TestLazyPersistFiles#testFallbackToDiskFull (kihwal) + Release 2.6.5 - 2016-10-08 INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/71c85622/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java index c762849..bdc5bf3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java @@ -48,6 +48,7 @@ import org.junit.rules.Timeout; import java.io.File; import java.io.IOException; import java.util.Arrays; +import java.util.concurrent.TimeoutException; import java.util.EnumSet; import java.util.List; import java.util.UUID; @@ -359,6 +360,7 @@ public abstract class LazyPersistTestCase { protected final void verifyRamDiskJMXMetric(String metricName, long expectedValue) throws Exception { +waitForMetric(metricName, (int)expectedValue); assertEquals(expectedValue, Integer.parseInt(jmx.getValue(metricName))); } @@ -386,4 +388,9 @@ public abstract class LazyPersistTestCase { e.printStackTrace(); } } + + protected void waitForMetric(final String metricName, final int expectedValue) + throws TimeoutException, InterruptedException { +DFSTestUtil.waitForMetric(jmx, metricName, expectedValue); + } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[2/5] hadoop git commit: HDFS-10270. TestJMXGet:testNameNode() fails. Contributed by Gergely Novák
HDFS-10270. TestJMXGet:testNameNode() fails. Contributed by Gergely Novák (cherry picked from commit d2f3bbc29046435904ad9418073795439c71b441) (cherry picked from commit ccaf746eeacfafc7392a994f28ee9dbf595c84af) (cherry picked from commit 9f5a22ff22b4841f78ac87c71c291456d20ef4d6) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88d7dcdb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88d7dcdb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88d7dcdb Branch: refs/heads/branch-2.6 Commit: 88d7dcdbeba983e0571da2aac536bf9d8853a09b Parents: dc6ace6 Author: Kihwal LeeAuthored: Wed Apr 13 11:24:45 2016 -0500 Committer: Xiao Chen Committed: Mon Oct 2 15:53:46 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../src/test/java/org/apache/hadoop/tools/TestJMXGet.java | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/88d7dcdb/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 74d5b55..f5bf5bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -26,6 +26,8 @@ Release 2.6.6 - UNRELEASED HDFS-9740. Use a reasonable limit in DFSTestUtil.waitForMetric() (Chang Li via vinayakumarb) +HDFS-10270. TestJMXGet:testNameNode() fails. Contributed by Gergely Novák. + Release 2.6.5 - 2016-10-08 INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/88d7dcdb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java index 278fbb8..769c7f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java @@ -120,9 +120,6 @@ public class TestJMXGet { jmx.getValue("NumLiveDataNodes"))); assertGauge("CorruptBlocks", Long.parseLong(jmx.getValue("CorruptBlocks")), getMetrics("FSNamesystem")); -DFSTestUtil.waitForMetric(jmx, "NumOpenConnections", numDatanodes); -assertEquals(numDatanodes, Integer.parseInt( -jmx.getValue("NumOpenConnections"))); cluster.shutdown(); MBeanServerConnection mbsc = ManagementFactory.getPlatformMBeanServer(); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[1/5] hadoop git commit: HDFS-8865. Improve quota initialization performance. Contributed by Kihwal Lee.
Repository: hadoop Updated Branches: refs/heads/branch-2.6 cfbc06338 -> cfa5595ac HDFS-8865. Improve quota initialization performance. Contributed by Kihwal Lee. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cfa5595a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cfa5595a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cfa5595a Branch: refs/heads/branch-2.6 Commit: cfa5595acec8c4f876b0b66a17ae75aa330ad95f Parents: 71c8562 Author: Xiao ChenAuthored: Mon Oct 2 15:36:57 2017 -0700 Committer: Xiao Chen Committed: Mon Oct 2 15:53:46 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 + .../hdfs/server/namenode/BackupImage.java | 8 +- .../hadoop/hdfs/server/namenode/FSImage.java| 113 +-- .../src/main/resources/hdfs-default.xml | 10 ++ .../namenode/TestDiskspaceQuotaUpdate.java | 65 +++ .../namenode/TestFSImageWithSnapshot.java | 4 +- 7 files changed, 166 insertions(+), 38 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfa5595a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1d02587..8f30159 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -61,6 +61,8 @@ Release 2.6.5 - 2016-10-08 HDFS-10870. Wrong dfs.namenode.acls.enabled default in HdfsPermissionsGuide.apt.vm. (John Zhuge via lei) +HDFS-8865. Improve quota initialization performance. (kihwal) + OPTIMIZATIONS HDFS-10653. Optimize conversion from path string to components. (Daryn http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfa5595a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 3f26105..04f67b6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -257,6 +257,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum"; public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1; + public static final String DFS_NAMENODE_QUOTA_INIT_THREADS_KEY = "dfs.namenode.quota.init-threads"; + public static final int DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT = 4; public static final String DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD = "dfs.namenode.edit.log.autoroll.multiplier.threshold"; public static final float DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD_DEFAULT = 2.0f; http://git-wip-us.apache.org/repos/asf/hadoop/blob/cfa5595a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java index 4f1973d..793087b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupImage.java @@ -25,6 +25,7 @@ import java.util.zip.Checksum; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -83,6 +84,8 @@ public class BackupImage extends FSImage { private FSNamesystem namesystem; + private int quotaInitThreads; + /** * Construct a backup image. * @param conf Configuration @@ -92,6 +95,9 @@ public class BackupImage extends FSImage { super(conf); storage.setDisablePreUpgradableLayoutCheck(true); bnState = BNState.DROP_UNTIL_NEXT_ROLL; +quotaInitThreads = conf.getInt( +DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_KEY, +
[5/5] hadoop git commit: HDFS-9740. Use a reasonable limit in DFSTestUtil.waitForMetric() (Contributed by Chang Li)
HDFS-9740. Use a reasonable limit in DFSTestUtil.waitForMetric() (Contributed by Chang Li) (cherry picked from commit eb2fb943fd97c191fd7c4f5333087a28ee5c87d8) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (cherry picked from commit 6941e2ccbf6f4c7c8663a68c7a7c9ed59bb49fbf) Conflicts: hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java (cherry picked from commit af9de19cfd7f3ad0d2d075bf79efdcee46868ca1) Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc6ace69 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc6ace69 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc6ace69 Branch: refs/heads/branch-2.6 Commit: dc6ace699c5a904b8c16968d76579eddd9f1b186 Parents: 07bbde8 Author: Xiao ChenAuthored: Mon Oct 2 15:32:39 2017 -0700 Committer: Xiao Chen Committed: Mon Oct 2 15:53:46 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc6ace69/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7adce3d..74d5b55 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -23,6 +23,9 @@ Release 2.6.6 - UNRELEASED HDFS-9072. Fix random failures in TestJMXGet (J.Andreina via kihwal) +HDFS-9740. Use a reasonable limit in DFSTestUtil.waitForMetric() +(Chang Li via vinayakumarb) + Release 2.6.5 - 2016-10-08 INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc6ace69/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 0c2bfc9..6256758 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -1721,6 +1721,6 @@ public class DFSTestUtil { throw new UnhandledException("Test failed due to unexpected exception", e); } } -}, 1000, Integer.MAX_VALUE); +}, 1000, 6); } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[3/5] hadoop git commit: HDFS-9072. Fix random failures in TestJMXGet. Contributed by J.Andreina.
HDFS-9072. Fix random failures in TestJMXGet. Contributed by J.Andreina. (cherry picked from commit 2f031830e804d5cf090c3597ce6cc86f2e1ccf02) Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07bbde87 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07bbde87 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07bbde87 Branch: refs/heads/branch-2.6 Commit: 07bbde8712005e6a84d2fc5709bb981fc1f8be31 Parents: cfbc063 Author: Xiao ChenAuthored: Sun Oct 1 19:06:00 2017 -0700 Committer: Xiao Chen Committed: Mon Oct 2 15:53:46 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../org/apache/hadoop/hdfs/DFSTestUtil.java | 20 .../org/apache/hadoop/tools/TestJMXGet.java | 4 3 files changed, 26 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/07bbde87/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4b17365..7adce3d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -21,6 +21,8 @@ Release 2.6.6 - UNRELEASED HDFS-11352. Potential deadlock in NN when failing over. (Eric Krogen via aajisaka) +HDFS-9072. Fix random failures in TestJMXGet (J.Andreina via kihwal) + Release 2.6.5 - 2016-10-08 INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/07bbde87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index c012f67..0c2bfc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -27,6 +27,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; import org.apache.commons.io.FileUtils; +import org.apache.commons.lang.UnhandledException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -73,6 +74,7 @@ import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo.BlockStatus; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.apache.hadoop.hdfs.tools.JMXGet; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.net.NetUtils; @@ -1703,4 +1705,22 @@ public class DFSTestUtil { lastBlock.setNumBytes(len); return lastBlock; } + + public static void waitForMetric(final JMXGet jmx, final String metricName, final int expectedValue) + throws TimeoutException, InterruptedException { +GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { +try { + final int currentValue = Integer.parseInt(jmx.getValue(metricName)); + LOG.info("Waiting for " + metricName + + " to reach value " + expectedValue + + ", current value = " + currentValue); + return currentValue == expectedValue; +} catch (Exception e) { + throw new UnhandledException("Test failed due to unexpected exception", e); +} + } +}, 1000, Integer.MAX_VALUE); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/07bbde87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java index c69e73a..278fbb8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java @@ -41,6 +41,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSTestUtil; import
[43/50] [abbrv] hadoop git commit: HDFS-12384. Fixing compilation issue with BanDuplicateClasses. Contributed by Inigo Goiri.
HDFS-12384. Fixing compilation issue with BanDuplicateClasses. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f95704d4 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f95704d4 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f95704d4 Branch: refs/heads/HDFS-10467 Commit: f95704d4978d9398b77714e03b3fa8f01903e9ed Parents: 820d228 Author: Inigo GoiriAuthored: Thu Sep 7 13:53:08 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 15:37:35 2017 -0700 -- hadoop-hdfs-project/hadoop-hdfs/pom.xml | 4 .../server/federation/router/RouterRpcServer.java| 15 +++ 2 files changed, 15 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f95704d4/hadoop-hdfs-project/hadoop-hdfs/pom.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index d22d6ee..0fe491b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -205,10 +205,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;> org.apache.curator - curator-framework - - - org.apache.curator curator-test test http://git-wip-us.apache.org/repos/asf/hadoop/blob/f95704d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index c77d255..f9b4a5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -81,6 +81,7 @@ import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -91,6 +92,7 @@ import org.apache.hadoop.hdfs.protocol.OpenFileEntry; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB; @@ -1607,6 +1609,19 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { } @Override // ClientProtocol + public void reencryptEncryptionZone(String zone, ReencryptAction action) + throws IOException { +checkOperation(OperationCategory.WRITE, false); + } + + @Override // ClientProtocol + public BatchedEntries listReencryptionStatus( + long prevId) throws IOException { +checkOperation(OperationCategory.READ, false); +return null; + } + + @Override // ClientProtocol public void setXAttr(String src, XAttr xAttr, EnumSet flag) throws IOException { checkOperation(OperationCategory.WRITE); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[26/50] [abbrv] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f8a7e20/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 24792bb..4bae71e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -17,16 +17,109 @@ */ package org.apache.hadoop.hdfs.server.federation.router; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_COUNT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_COUNT_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ROUTER_READER_QUEUE_SIZE_KEY; + +import java.io.FileNotFoundException; import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.TreeMap; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.crypto.CryptoProtocolVersion; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; +import org.apache.hadoop.fs.CacheFlag; +import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.ContentSummary; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FileAlreadyExistsException; +import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.QuotaUsage; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.fs.XAttr; +import org.apache.hadoop.fs.XAttrSetFlag; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.fs.permission.FsAction; +import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdfs.AddBlockFlag; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.inotify.EventBatchList; +import org.apache.hadoop.hdfs.protocol.AddingECPolicyResponse; +import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolEntry; +import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.ClientProtocol; +import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; +import org.apache.hadoop.hdfs.protocol.DatanodeID; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.DirectoryListing; +import org.apache.hadoop.hdfs.protocol.EncryptionZone; +import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction; +import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; +import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; +import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB; +import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import
[20/50] [abbrv] hadoop git commit: HDFS-12271. Incorrect statement in Downgrade section of HDFS Rolling Upgrade document. Contributed by Nandakumar.
HDFS-12271. Incorrect statement in Downgrade section of HDFS Rolling Upgrade document. Contributed by Nandakumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c62ff7f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c62ff7f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c62ff7f Branch: refs/heads/HDFS-10467 Commit: 2c62ff7fd4392d55fe41573da297e2c0bc4f4c41 Parents: 442ea85 Author: Chen LiangAuthored: Mon Oct 2 13:18:00 2017 -0700 Committer: Chen Liang Committed: Mon Oct 2 13:18:00 2017 -0700 -- .../hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c62ff7f/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md index cabdd74..386ab6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsRollingUpgrade.md @@ -179,7 +179,7 @@ Below are the steps for rolling downgrade without downtime: 1. Start *NN2* as standby normally. 1. Failover from *NN1* to *NN2* so that *NN2* becomes active and *NN1* becomes standby. -1. Shutdown and upgrade *NN1*. +1. Shutdown and downgrade *NN1*. 1. Start *NN1* as standby normally. 1. Finalize Rolling Downgrade 1. Run "[`hdfs dfsadmin -rollingUpgrade finalize`](#dfsadmin_-rollingUpgrade)" - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[32/50] [abbrv] hadoop git commit: HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri.
HDFS-10687. Federation Membership State Store internal API. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98d26835 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98d26835 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98d26835 Branch: refs/heads/HDFS-10467 Commit: 98d268353014c68e5a6b30b9e43d2bb06a262e83 Parents: 9f1ff73 Author: Inigo GoiriAuthored: Mon Jul 31 10:55:21 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 15:37:33 2017 -0700 -- .../dev-support/findbugsExcludeFile.xml | 3 + hadoop-hdfs-project/hadoop-hdfs/pom.xml | 1 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 17 +- .../resolver/MembershipNamenodeResolver.java| 290 .../federation/router/FederationUtil.java | 42 +- .../federation/store/CachedRecordStore.java | 237 ++ .../federation/store/MembershipStore.java | 126 + .../federation/store/StateStoreCache.java | 36 ++ .../store/StateStoreCacheUpdateService.java | 67 +++ .../federation/store/StateStoreService.java | 202 +++- .../store/impl/MembershipStoreImpl.java | 311 + .../federation/store/impl/package-info.java | 31 ++ .../GetNamenodeRegistrationsRequest.java| 52 +++ .../GetNamenodeRegistrationsResponse.java | 55 +++ .../store/protocol/GetNamespaceInfoRequest.java | 30 ++ .../protocol/GetNamespaceInfoResponse.java | 52 +++ .../protocol/NamenodeHeartbeatRequest.java | 52 +++ .../protocol/NamenodeHeartbeatResponse.java | 49 ++ .../UpdateNamenodeRegistrationRequest.java | 72 +++ .../UpdateNamenodeRegistrationResponse.java | 51 ++ .../impl/pb/FederationProtocolPBTranslator.java | 145 ++ .../GetNamenodeRegistrationsRequestPBImpl.java | 87 .../GetNamenodeRegistrationsResponsePBImpl.java | 99 .../impl/pb/GetNamespaceInfoRequestPBImpl.java | 60 +++ .../impl/pb/GetNamespaceInfoResponsePBImpl.java | 95 .../impl/pb/NamenodeHeartbeatRequestPBImpl.java | 93 .../pb/NamenodeHeartbeatResponsePBImpl.java | 71 +++ ...UpdateNamenodeRegistrationRequestPBImpl.java | 95 ...pdateNamenodeRegistrationResponsePBImpl.java | 73 +++ .../store/protocol/impl/pb/package-info.java| 29 ++ .../store/records/MembershipState.java | 329 + .../store/records/MembershipStats.java | 126 + .../records/impl/pb/MembershipStatePBImpl.java | 334 + .../records/impl/pb/MembershipStatsPBImpl.java | 191 .../src/main/proto/FederationProtocol.proto | 107 + .../src/main/resources/hdfs-default.xml | 18 +- .../resolver/TestNamenodeResolver.java | 284 .../store/FederationStateStoreTestUtils.java| 23 +- .../federation/store/TestStateStoreBase.java| 81 .../store/TestStateStoreMembershipState.java| 463 +++ .../store/driver/TestStateStoreDriverBase.java | 69 ++- .../store/records/TestMembershipState.java | 129 ++ 42 files changed, 4745 insertions(+), 32 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/98d26835/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml index 9582fcb..4b958b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/findbugsExcludeFile.xml @@ -15,6 +15,9 @@ + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/98d26835/hadoop-hdfs-project/hadoop-hdfs/pom.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 425572f..cc7a975 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -331,6 +331,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;> QJournalProtocol.proto editlog.proto fsimage.proto + FederationProtocol.proto http://git-wip-us.apache.org/repos/asf/hadoop/blob/98d26835/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
[02/50] [abbrv] hadoop git commit: HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly calculated. Contributed by Hanisha Koneru
HADOOP-14902. LoadGenerator#genFile write close timing is incorrectly calculated. Contributed by Hanisha Koneru Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f789fe0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f789fe0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f789fe0 Branch: refs/heads/HDFS-10467 Commit: 6f789fe05766a61b12ca10df3f26ee354eac84aa Parents: ca669f9 Author: Jason LoweAuthored: Thu Sep 28 16:38:30 2017 -0500 Committer: Jason Lowe Committed: Thu Sep 28 16:38:30 2017 -0500 -- .../apache/hadoop/fs/loadGenerator/LoadGenerator.java| 11 --- 1 file changed, 8 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f789fe0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java index 0bb1b46..b74e75d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java @@ -322,6 +322,7 @@ public class LoadGenerator extends Configured implements Tool { private void genFile(Path file, long fileSize) throws IOException { long startTimestamp = Time.monotonicNow(); FSDataOutputStream out = null; + boolean isOutClosed = false; try { out = fc.create(file, EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), @@ -337,11 +338,15 @@ public class LoadGenerator extends Configured implements Tool { i -= s; } -startTimestamp = Time.monotonicNow(); -executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTimestamp); +startTime = Time.monotonicNow(); +out.close(); +executionTime[WRITE_CLOSE] += (Time.monotonicNow() - startTime); numOfOps[WRITE_CLOSE]++; +isOutClosed = true; } finally { -IOUtils.cleanupWithLogger(LOG, out); +if (!isOutClosed && out != null) { + out.close(); +} } } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[22/50] [abbrv] hadoop git commit: HDFS-12569. Unset EC policy logs empty payload in edit log. (Lei (Eddy) Xu)
HDFS-12569. Unset EC policy logs empty payload in edit log. (Lei (Eddy) Xu) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27ffd43b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27ffd43b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27ffd43b Branch: refs/heads/HDFS-10467 Commit: 27ffd43b6419c9ebe697536bcb6abb858ce791d2 Parents: 280080f Author: Lei XuAuthored: Mon Oct 2 15:31:20 2017 -0700 Committer: Lei Xu Committed: Mon Oct 2 15:35:49 2017 -0700 -- .../hdfs/server/namenode/FSDirErasureCodingOp.java | 3 +-- .../hadoop/hdfs/server/namenode/FSDirXAttrOp.java | 4 .../hdfs/TestUnsetAndChangeDirectoryEcPolicy.java | 13 + 3 files changed, 18 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/27ffd43b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java index 181b147..391e392 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java @@ -298,8 +298,7 @@ final class FSDirErasureCodingOp { final List xattrs = Lists.newArrayListWithCapacity(1); xattrs.add(ecXAttr); -FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP, xattrs); -return xattrs; +return FSDirXAttrOp.unprotectedRemoveXAttrs(fsd, srcIIP, xattrs); } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/27ffd43b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java index acdade7..3223467 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java @@ -184,6 +184,10 @@ class FSDirXAttrOp { return fsd.getAuditFileInfo(iip); } + /** + * Remove xattrs from the inode, and return the removed xattrs. + * @return the removed xattrs. + */ static List unprotectedRemoveXAttrs( FSDirectory fsd, final INodesInPath iip, final List toRemove) throws IOException { http://git-wip-us.apache.org/repos/asf/hadoop/blob/27ffd43b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java index 529a110..52cf163 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java @@ -362,4 +362,17 @@ public class TestUnsetAndChangeDirectoryEcPolicy { + ecFilePath, e); } } + + /** + * Test unsetEcPolicy is persisted correctly in edit log. + */ + @Test + public void testUnsetEcPolicyInEditLog() throws IOException { +fs.getClient().setErasureCodingPolicy("/", ecPolicy.getName()); +Assert.assertEquals(ecPolicy, fs.getErasureCodingPolicy(new Path("/"))); +fs.getClient().unsetErasureCodingPolicy("/"); + +cluster.restartNameNode(true); +Assert.assertNull(fs.getErasureCodingPolicy(new Path("/"))); + } } \ No newline at end of file - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[11/50] [abbrv] hadoop git commit: HADOOP-14909. Fix the word of erasure encoding in the top page. Contributed by Takanobu Asanuma.
HADOOP-14909. Fix the word of erasure encoding in the top page. Contributed by Takanobu Asanuma. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/373d0a51 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/373d0a51 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/373d0a51 Branch: refs/heads/HDFS-10467 Commit: 373d0a51955cabff77e934a28ba2de308207374a Parents: 8aca46e Author: Andrew WangAuthored: Fri Sep 29 13:34:36 2017 -0700 Committer: Andrew Wang Committed: Fri Sep 29 13:34:36 2017 -0700 -- hadoop-project/src/site/markdown/index.md.vm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/373d0a51/hadoop-project/src/site/markdown/index.md.vm -- diff --git a/hadoop-project/src/site/markdown/index.md.vm b/hadoop-project/src/site/markdown/index.md.vm index d9443d6..1526f59 100644 --- a/hadoop-project/src/site/markdown/index.md.vm +++ b/hadoop-project/src/site/markdown/index.md.vm @@ -34,7 +34,7 @@ Minimum required Java version increased from Java 7 to Java 8 All Hadoop JARs are now compiled targeting a runtime version of Java 8. Users still using Java 7 or below must upgrade to Java 8. -Support for erasure encoding in HDFS +Support for erasure coding in HDFS -- Erasure coding is a method for durably storing data with significant space - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[34/50] [abbrv] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.
HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/34fb52fc Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/34fb52fc Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/34fb52fc Branch: refs/heads/HDFS-10467 Commit: 34fb52fcb810a076ca89f467b264d8e768995d87 Parents: 27ffd43 Author: InigoAuthored: Tue Mar 28 14:30:59 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 15:37:33 2017 -0700 -- .../hadoop-hdfs/src/main/bin/hdfs | 5 + .../hadoop-hdfs/src/main/bin/hdfs.cmd | 8 +- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 17 + .../resolver/ActiveNamenodeResolver.java| 117 +++ .../resolver/FederationNamenodeContext.java | 87 +++ .../FederationNamenodeServiceState.java | 46 ++ .../resolver/FederationNamespaceInfo.java | 99 +++ .../resolver/FileSubclusterResolver.java| 75 ++ .../resolver/NamenodePriorityComparator.java| 63 ++ .../resolver/NamenodeStatusReport.java | 195 + .../federation/resolver/PathLocation.java | 122 +++ .../federation/resolver/RemoteLocation.java | 74 ++ .../federation/resolver/package-info.java | 41 + .../federation/router/FederationUtil.java | 117 +++ .../router/RemoteLocationContext.java | 38 + .../hdfs/server/federation/router/Router.java | 263 +++ .../federation/router/RouterRpcServer.java | 102 +++ .../server/federation/router/package-info.java | 31 + .../federation/store/StateStoreService.java | 77 ++ .../server/federation/store/package-info.java | 62 ++ .../src/main/resources/hdfs-default.xml | 16 + .../server/federation/FederationTestUtils.java | 233 ++ .../hdfs/server/federation/MockResolver.java| 290 +++ .../server/federation/RouterConfigBuilder.java | 40 + .../server/federation/RouterDFSCluster.java | 767 +++ .../server/federation/router/TestRouter.java| 96 +++ 26 files changed, 3080 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/34fb52fc/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs index e6405b5..b1f44a4 100755 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs @@ -57,6 +57,7 @@ function hadoop_usage hadoop_add_subcommand "oiv" admin "apply the offline fsimage viewer to an fsimage" hadoop_add_subcommand "oiv_legacy" admin "apply the offline fsimage viewer to a legacy fsimage" hadoop_add_subcommand "portmap" daemon "run a portmap service" + hadoop_add_subcommand "router" daemon "run the DFS router" hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary namenode" hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a directory or diff the current directory contents with a snapshot" hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage policies" @@ -176,6 +177,10 @@ function hdfscmd_case HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME=org.apache.hadoop.portmap.Portmap ;; +router) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.federation.router.Router' +;; secondarynamenode) HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" HADOOP_CLASSNAME='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode' http://git-wip-us.apache.org/repos/asf/hadoop/blob/34fb52fc/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd index 2181e47..b9853d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd @@ -59,7 +59,7 @@ if "%1" == "--loglevel" ( ) ) - set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto debug + set hdfscommands=dfs namenode secondarynamenode journalnode zkfc datanode dfsadmin haadmin fsck balancer jmxget oiv oev fetchdt getconf groups snapshotDiff lsSnapshottableDir cacheadmin mover storagepolicies classpath crypto router debug for %%i in ( %hdfscommands% ) do ( if %hdfs-command% == %%i set hdfscommand=true
[33/50] [abbrv] hadoop git commit: HDFS-10629. Federation Roter. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/34fb52fc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java new file mode 100644 index 000..ee6f57d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -0,0 +1,290 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeContext; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamenodeServiceState; +import org.apache.hadoop.hdfs.server.federation.resolver.FederationNamespaceInfo; +import org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver; +import org.apache.hadoop.hdfs.server.federation.resolver.NamenodePriorityComparator; +import org.apache.hadoop.hdfs.server.federation.resolver.NamenodeStatusReport; +import org.apache.hadoop.hdfs.server.federation.resolver.PathLocation; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.util.Time; + +/** + * In-memory cache/mock of a namenode and file resolver. Stores the most + * recently updated NN information for each nameservice and block pool. Also + * stores a virtual mount table for resolving global namespace paths to local NN + * paths. + */ +public class MockResolver +implements ActiveNamenodeResolver, FileSubclusterResolver { + + private Mapresolver = + new HashMap (); + private Map locations = + new HashMap (); + private Set namespaces = + new HashSet(); + private String defaultNamespace = null; + + public MockResolver(Configuration conf, StateStoreService store) { +this.cleanRegistrations(); + } + + public void addLocation(String mount, String nameservice, String location) { +RemoteLocation remoteLocation = new RemoteLocation(nameservice, location); +List locationsList = locations.get(mount); +if (locationsList == null) { + locationsList = new LinkedList(); + locations.put(mount, locationsList); +} +if (!locationsList.contains(remoteLocation)) { + locationsList.add(remoteLocation); +} + +if (this.defaultNamespace == null) { + this.defaultNamespace = nameservice; +} + } + + public synchronized void cleanRegistrations() { +this.resolver = +new HashMap (); +this.namespaces = new HashSet(); + } + + @Override + public void updateActiveNamenode( + String ns, InetSocketAddress successfulAddress) { + +String address = successfulAddress.getHostName() + ":" + +successfulAddress.getPort(); +String key = ns; +if (key != null) { + // Update the active entry + @SuppressWarnings("unchecked") + List iterator = + (List) resolver.get(key); + for (FederationNamenodeContext namenode : iterator) { +if (namenode.getRpcAddress().equals(address)) { + MockNamenodeContext nn = (MockNamenodeContext) namenode; + nn.setState(FederationNamenodeServiceState.ACTIVE); + break; +} + } + Collections.sort(iterator, new NamenodePriorityComparator()); +} + } + + @Override + public List + getNamenodesForNameserviceId(String nameserviceId) { +return resolver.get(nameserviceId); + }
[03/50] [abbrv] hadoop git commit: YARN-7250. Update Shared cache client api to use URLs.
YARN-7250. Update Shared cache client api to use URLs. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c114da5e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c114da5e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c114da5e Branch: refs/heads/HDFS-10467 Commit: c114da5e64d14b1d9e614081c4171ea0391cb1aa Parents: 6f789fe Author: Chris TrezzoAuthored: Thu Sep 28 15:28:06 2017 -0700 Committer: Chris Trezzo Committed: Thu Sep 28 15:28:06 2017 -0700 -- .../yarn/client/api/SharedCacheClient.java | 22 .../client/api/impl/SharedCacheClientImpl.java | 36 +--- .../api/impl/TestSharedCacheClientImpl.java | 31 + 3 files changed, 23 insertions(+), 66 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c114da5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java index 60c1bd98..a9c1a07 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/SharedCacheClient.java @@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.fs.Path; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.client.api.impl.SharedCacheClientImpl; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -58,34 +59,25 @@ public abstract class SharedCacheClient extends AbstractService { * * * The SharedCacheManager responds with whether or not the - * resource exists in the cache. If the resource exists, a Path - * to the resource in the shared cache is returned. If the resource does not + * resource exists in the cache. If the resource exists, a URL to + * the resource in the shared cache is returned. If the resource does not * exist, null is returned instead. * * * - * Once a path has been returned for a resource, that path is safe to use for + * Once a URL has been returned for a resource, that URL is safe to use for * the lifetime of the application that corresponds to the provided * ApplicationId. * * - * - * Additionally, a name for the resource should be specified. A fragment will - * be added to the path with the desired name if the desired name is different - * than the name of the provided path from the shared cache. This ensures that - * if the returned path is used to create a LocalResource, then the symlink - * created during YARN localization will match the name specified. - * - * * @param applicationId ApplicationId of the application using the resource * @param resourceKey the key (i.e. checksum) that identifies the resource - * @param resourceName the desired name of the resource - * @return Path to the resource, or null if it does not exist + * @return URL to the resource, or null if it does not exist */ @Public @Unstable - public abstract Path use(ApplicationId applicationId, String resourceKey, - String resourceName) throws YarnException; + public abstract URL use(ApplicationId applicationId, String resourceKey) + throws YarnException; /** * http://git-wip-us.apache.org/repos/asf/hadoop/blob/c114da5e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java index b910c28..3191d36 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/SharedCacheClientImpl.java @@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.client.api.impl; import java.io.IOException; import
[23/50] [abbrv] hadoop git commit: HDFS-10630. Federation State Store FS Implementation. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a7710b2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java new file mode 100644 index 000..7f0b36a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/store/driver/TestStateStoreDriverBase.java @@ -0,0 +1,483 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.store.driver; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.records.BaseRecord; +import org.apache.hadoop.hdfs.server.federation.store.records.Query; +import org.apache.hadoop.hdfs.server.federation.store.records.QueryResult; +import org.junit.AfterClass; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Base tests for the driver. The particular implementations will use this to + * test their functionality. + */ +public class TestStateStoreDriverBase { + + private static final Logger LOG = + LoggerFactory.getLogger(TestStateStoreDriverBase.class); + + private static StateStoreService stateStore; + private static Configuration conf; + + + /** + * Get the State Store driver. + * @return State Store driver. + */ + protected StateStoreDriver getStateStoreDriver() { +return stateStore.getDriver(); + } + + @AfterClass + public static void tearDownCluster() { +if (stateStore != null) { + stateStore.stop(); +} + } + + /** + * Get a new State Store using this configuration. + * + * @param config Configuration for the State Store. + * @throws Exception If we cannot get the State Store. + */ + public static void getStateStore(Configuration config) throws Exception { +conf = config; +stateStore = FederationStateStoreTestUtils.getStateStore(conf); + } + + private T generateFakeRecord(Class recordClass) + throws IllegalArgumentException, IllegalAccessException, IOException { + +// TODO add record +return null; + } + + /** + * Validate if a record is the same. + * + * @param original + * @param committed + * @param assertEquals Assert if the records are equal or just return. + * @return + * @throws IllegalArgumentException + * @throws IllegalAccessException + */ + private boolean validateRecord( + BaseRecord original, BaseRecord committed, boolean assertEquals) + throws IllegalArgumentException, IllegalAccessException { + +boolean ret = true; + +Mapfields = getFields(original); +for (String key : fields.keySet()) { + if (key.equals("dateModified") || + key.equals("dateCreated") || + key.equals("proto")) { +// Fields are updated/set on commit and fetch and may not match +// the fields that are initialized in a non-committed object. +continue; + } + Object data1 = getField(original, key); + Object data2 = getField(committed, key); + if (assertEquals) { +assertEquals("Field " + key + " does not match", data1, data2); + } else if (!data1.equals(data2)) { +ret = false; + } +} + +long now =
[28/50] [abbrv] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.
HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9f8a7e20 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9f8a7e20 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9f8a7e20 Branch: refs/heads/HDFS-10467 Commit: 9f8a7e20411c17f10737f22c950d982ab8fc0779 Parents: 4a7710b Author: Inigo GoiriAuthored: Thu May 11 09:57:03 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 15:37:33 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 38 + .../resolver/FederationNamespaceInfo.java | 46 +- .../federation/resolver/RemoteLocation.java | 46 +- .../federation/router/ConnectionContext.java| 104 + .../federation/router/ConnectionManager.java| 408 .../federation/router/ConnectionPool.java | 314 +++ .../federation/router/ConnectionPoolId.java | 117 ++ .../router/RemoteLocationContext.java | 38 +- .../server/federation/router/RemoteMethod.java | 164 ++ .../server/federation/router/RemoteParam.java | 71 + .../hdfs/server/federation/router/Router.java | 58 +- .../federation/router/RouterRpcClient.java | 856 .../federation/router/RouterRpcServer.java | 1867 +- .../src/main/resources/hdfs-default.xml | 95 + .../server/federation/FederationTestUtils.java | 80 +- .../hdfs/server/federation/MockResolver.java| 90 +- .../server/federation/RouterConfigBuilder.java | 20 +- .../server/federation/RouterDFSCluster.java | 535 +++-- .../server/federation/router/TestRouter.java| 31 +- .../server/federation/router/TestRouterRpc.java | 869 .../router/TestRouterRpcMultiDestination.java | 216 ++ 21 files changed, 5675 insertions(+), 388 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f8a7e20/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 10074ce..c7b4c01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1120,6 +1120,44 @@ public class DFSConfigKeys extends CommonConfigurationKeys { // HDFS Router-based federation public static final String FEDERATION_ROUTER_PREFIX = "dfs.federation.router."; + public static final String DFS_ROUTER_DEFAULT_NAMESERVICE = + FEDERATION_ROUTER_PREFIX + "default.nameserviceId"; + public static final String DFS_ROUTER_HANDLER_COUNT_KEY = + FEDERATION_ROUTER_PREFIX + "handler.count"; + public static final int DFS_ROUTER_HANDLER_COUNT_DEFAULT = 10; + public static final String DFS_ROUTER_READER_QUEUE_SIZE_KEY = + FEDERATION_ROUTER_PREFIX + "reader.queue.size"; + public static final int DFS_ROUTER_READER_QUEUE_SIZE_DEFAULT = 100; + public static final String DFS_ROUTER_READER_COUNT_KEY = + FEDERATION_ROUTER_PREFIX + "reader.count"; + public static final int DFS_ROUTER_READER_COUNT_DEFAULT = 1; + public static final String DFS_ROUTER_HANDLER_QUEUE_SIZE_KEY = + FEDERATION_ROUTER_PREFIX + "handler.queue.size"; + public static final int DFS_ROUTER_HANDLER_QUEUE_SIZE_DEFAULT = 100; + public static final String DFS_ROUTER_RPC_BIND_HOST_KEY = + FEDERATION_ROUTER_PREFIX + "rpc-bind-host"; + public static final int DFS_ROUTER_RPC_PORT_DEFAULT = ; + public static final String DFS_ROUTER_RPC_ADDRESS_KEY = + FEDERATION_ROUTER_PREFIX + "rpc-address"; + public static final String DFS_ROUTER_RPC_ADDRESS_DEFAULT = + "0.0.0.0:" + DFS_ROUTER_RPC_PORT_DEFAULT; + public static final String DFS_ROUTER_RPC_ENABLE = + FEDERATION_ROUTER_PREFIX + "rpc.enable"; + public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true; + + // HDFS Router NN client + public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE = + FEDERATION_ROUTER_PREFIX + "connection.pool-size"; + public static final int DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE_DEFAULT = + 64; + public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN = + FEDERATION_ROUTER_PREFIX + "connection.pool.clean.ms"; + public static final long DFS_ROUTER_NAMENODE_CONNECTION_POOL_CLEAN_DEFAULT = + TimeUnit.MINUTES.toMillis(1); + public static final String DFS_ROUTER_NAMENODE_CONNECTION_CLEAN_MS = + FEDERATION_ROUTER_PREFIX + "connection.clean.ms"; + public
[50/50] [abbrv] hadoop git commit: HDFS-12381. [Documentation] Adding configuration keys for the Router. Contributed by Inigo Goiri.
HDFS-12381. [Documentation] Adding configuration keys for the Router. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17c38bd8 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17c38bd8 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17c38bd8 Branch: refs/heads/HDFS-10467 Commit: 17c38bd8b971c737c54a5f99e7cdba024dfeaa82 Parents: c28ca5c Author: Inigo GoiriAuthored: Fri Sep 22 13:06:10 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 15:37:39 2017 -0700 -- .../src/main/resources/hdfs-default.xml | 11 +- .../src/site/markdown/HDFSRouterFederation.md | 159 +-- 2 files changed, 156 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/17c38bd8/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index d58fcae..9a75f7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4652,7 +4652,8 @@ dfs.federation.router.rpc.enable true - If the RPC service to handle client requests in the router is enabled. + If true, the RPC service to handle client requests in the router is + enabled. @@ -4756,7 +4757,7 @@ dfs.federation.router.admin.enable true - If the RPC admin service to handle client requests in the router is + If true, the RPC admin service to handle client requests in the router is enabled. @@ -4810,7 +4811,7 @@ dfs.federation.router.store.enable true - If the Router connects to the State Store. + If true, the Router connects to the State Store. @@ -4858,7 +4859,7 @@ dfs.federation.router.heartbeat.enable true - Enables the Router to heartbeat into the State Store. + If true, the Router heartbeats into the State Store. @@ -4882,7 +4883,7 @@ dfs.federation.router.monitor.localnamenode.enable true - If the Router should monitor the namenode in the local machine. + If true, the Router should monitor the namenode in the local machine. http://git-wip-us.apache.org/repos/asf/hadoop/blob/17c38bd8/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md index f094238..1cea7f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md @@ -21,7 +21,7 @@ Introduction NameNodes have scalability limits because of the metadata overhead comprised of inodes (files and directories) and file blocks, the number of Datanode heartbeats, and the number of HDFS RPC client requests. -The common solution is to split the filesystem into smaller subclusters [HDFS Federation](.Federation.html) and provide a federated view [ViewFs](.ViewFs.html). +The common solution is to split the filesystem into smaller subclusters [HDFS Federation](./Federation.html) and provide a federated view [ViewFs](./ViewFs.html). The problem is how to maintain the split of the subclusters (e.g., namespace partition), which forces users to connect to multiple subclusters and manage the allocation of folders/files to them. @@ -35,7 +35,7 @@ This layer must be scalable, highly available, and fault tolerant. This federation layer comprises multiple components. The _Router_ component that has the same interface as a NameNode, and forwards the client requests to the correct subcluster, based on ground-truth information from a State Store. -The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](.ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters. +The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](./ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters. This approach has the same architecture as [YARN federation](../hadoop-yarn/Federation.html). ![Router-based Federation Sequence Diagram | width=800](./images/routerfederation.png) @@ -101,11 +101,11 @@ To interact with the users and the administrators, the
[46/50] [abbrv] hadoop git commit: HDFS-12335. Federation Metrics. Contributed by Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/92fa0baa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java new file mode 100644 index 000..851538a --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetrics.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.metrics2.impl.MsInfo.ProcessName; +import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.metrics2.MetricsSystem; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; +import org.apache.hadoop.metrics2.source.JvmMetrics; + +/** + * This class is for maintaining the various Router activity statistics + * and publishing them through the metrics interfaces. + */ +@Metrics(name="RouterActivity", about="Router metrics", context="dfs") +public class RouterMetrics { + + private final MetricsRegistry registry = new MetricsRegistry("router"); + + @Metric("Duration in SafeMode at startup in msec") + private MutableGaugeInt safeModeTime; + + private JvmMetrics jvmMetrics = null; + + RouterMetrics( + String processName, String sessionId, final JvmMetrics jvmMetrics) { +this.jvmMetrics = jvmMetrics; +registry.tag(ProcessName, processName).tag(SessionId, sessionId); + } + + public static RouterMetrics create(Configuration conf) { +String sessionId = conf.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY); +String processName = "Router"; +MetricsSystem ms = DefaultMetricsSystem.instance(); +JvmMetrics jm = JvmMetrics.create(processName, sessionId, ms); + +return ms.register(new RouterMetrics(processName, sessionId, jm)); + } + + public JvmMetrics getJvmMetrics() { +return jvmMetrics; + } + + public void shutdown() { +DefaultMetricsSystem.shutdown(); + } + + public void setSafeModeTime(long elapsed) { +safeModeTime.set((int) elapsed); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/92fa0baa/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java new file mode 100644 index 000..f4debce --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterMetricsService.java @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */
[48/50] [abbrv] hadoop git commit: HDFS-12430. Rebasing HDFS-10467 After HDFS-12269 and HDFS-12218. Contributed by Inigo Goiri.
HDFS-12430. Rebasing HDFS-10467 After HDFS-12269 and HDFS-12218. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0d42441 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0d42441 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0d42441 Branch: refs/heads/HDFS-10467 Commit: d0d42441a30d9899b9bcf7714cc8a346ce71b121 Parents: 92fa0ba Author: Inigo GoiriAuthored: Wed Sep 13 09:15:13 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 15:37:38 2017 -0700 -- .../hdfs/server/federation/router/RouterRpcServer.java| 10 +- 1 file changed, 5 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0d42441/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java index 6aee1ee..1fa1720 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java @@ -66,7 +66,6 @@ import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.inotify.EventBatchList; import org.apache.hadoop.hdfs.protocol.AddECPolicyResponse; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; -import org.apache.hadoop.hdfs.protocol.BlocksStats; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; @@ -76,7 +75,7 @@ import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; -import org.apache.hadoop.hdfs.protocol.ECBlockGroupsStats; +import org.apache.hadoop.hdfs.protocol.ECBlockGroupStats; import org.apache.hadoop.hdfs.protocol.EncryptionZone; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -89,6 +88,7 @@ import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.OpenFileEntry; +import org.apache.hadoop.hdfs.protocol.ReplicatedBlockStats; import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; @@ -1879,19 +1879,19 @@ public class RouterRpcServer extends AbstractService implements ClientProtocol { } @Override - public ECBlockGroupsStats getECBlockGroupsStats() throws IOException { + public ECBlockGroupStats getECBlockGroupStats() throws IOException { checkOperation(OperationCategory.READ, false); return null; } @Override - public HashMap getErasureCodingCodecs() throws IOException { + public Map getErasureCodingCodecs() throws IOException { checkOperation(OperationCategory.READ, false); return null; } @Override - public BlocksStats getBlocksStats() throws IOException { + public ReplicatedBlockStats getReplicatedBlockStats() throws IOException { checkOperation(OperationCategory.READ, false); return null; } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[16/50] [abbrv] hadoop git commit: HADOOP-14915. method name is incorrect in ConfServlet. Contributed by Bharat Viswanadham.
HADOOP-14915. method name is incorrect in ConfServlet. Contributed by Bharat Viswanadham. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/563dcdfc Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/563dcdfc Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/563dcdfc Branch: refs/heads/HDFS-10467 Commit: 563dcdfc1de7ea9ee7ce296163cf2678dfe5349c Parents: 06df6ab Author: Chen LiangAuthored: Mon Oct 2 10:58:19 2017 -0700 Committer: Chen Liang Committed: Mon Oct 2 10:58:19 2017 -0700 -- .../src/main/java/org/apache/hadoop/conf/ConfServlet.java| 4 ++-- .../src/test/java/org/apache/hadoop/conf/TestConfServlet.java| 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/563dcdfc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java index cfd7b97..2128de7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java @@ -69,7 +69,7 @@ public class ConfServlet extends HttpServlet { return; } -String format = parseAccecptHeader(request); +String format = parseAcceptHeader(request); if (FORMAT_XML.equals(format)) { response.setContentType("text/xml; charset=utf-8"); } else if (FORMAT_JSON.equals(format)) { @@ -89,7 +89,7 @@ public class ConfServlet extends HttpServlet { } @VisibleForTesting - static String parseAccecptHeader(HttpServletRequest request) { + static String parseAcceptHeader(HttpServletRequest request) { String format = request.getHeader(HttpHeaders.ACCEPT); return format != null && format.contains(FORMAT_JSON) ? FORMAT_JSON : FORMAT_XML; http://git-wip-us.apache.org/repos/asf/hadoop/blob/563dcdfc/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java index 53089ed..cf42219 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfServlet.java @@ -98,7 +98,7 @@ public class TestConfServlet { Mockito.when(request.getHeader(HttpHeaders.ACCEPT)) .thenReturn(contentTypeExpected); assertEquals(contenTypeActual, - ConfServlet.parseAccecptHeader(request)); + ConfServlet.parseAcceptHeader(request)); } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[39/50] [abbrv] hadoop git commit: HDFS-11826. Federation Namenode Heartbeat. Contributed by Inigo Goiri.
HDFS-11826. Federation Namenode Heartbeat. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3085760e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3085760e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3085760e Branch: refs/heads/HDFS-10467 Commit: 3085760e5649ea6b81e2f750a1a588afe1295159 Parents: 98d2683 Author: Inigo GoiriAuthored: Tue Aug 1 14:40:27 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 15:37:34 2017 -0700 -- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 14 + .../java/org/apache/hadoop/hdfs/DFSUtil.java| 38 ++ .../resolver/NamenodeStatusReport.java | 193 ++ .../federation/router/FederationUtil.java | 66 .../router/NamenodeHeartbeatService.java| 350 +++ .../hdfs/server/federation/router/Router.java | 112 ++ .../src/main/resources/hdfs-default.xml | 32 ++ .../org/apache/hadoop/hdfs/MiniDFSCluster.java | 8 + .../hdfs/server/federation/MockResolver.java| 9 +- .../server/federation/RouterConfigBuilder.java | 22 ++ .../server/federation/RouterDFSCluster.java | 43 +++ .../router/TestNamenodeHeartbeat.java | 168 + .../server/federation/router/TestRouter.java| 3 + 13 files changed, 1057 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3085760e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b50c538..f0b0c63 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -1147,6 +1147,20 @@ public class DFSConfigKeys extends CommonConfigurationKeys { FEDERATION_ROUTER_PREFIX + "rpc.enable"; public static final boolean DFS_ROUTER_RPC_ENABLE_DEFAULT = true; + // HDFS Router heartbeat + public static final String DFS_ROUTER_HEARTBEAT_ENABLE = + FEDERATION_ROUTER_PREFIX + "heartbeat.enable"; + public static final boolean DFS_ROUTER_HEARTBEAT_ENABLE_DEFAULT = true; + public static final String DFS_ROUTER_HEARTBEAT_INTERVAL_MS = + FEDERATION_ROUTER_PREFIX + "heartbeat.interval"; + public static final long DFS_ROUTER_HEARTBEAT_INTERVAL_MS_DEFAULT = + TimeUnit.SECONDS.toMillis(5); + public static final String DFS_ROUTER_MONITOR_NAMENODE = + FEDERATION_ROUTER_PREFIX + "monitor.namenode"; + public static final String DFS_ROUTER_MONITOR_LOCAL_NAMENODE = + FEDERATION_ROUTER_PREFIX + "monitor.localnamenode.enable"; + public static final boolean DFS_ROUTER_MONITOR_LOCAL_NAMENODE_DEFAULT = true; + // HDFS Router NN client public static final String DFS_ROUTER_NAMENODE_CONNECTION_POOL_SIZE = FEDERATION_ROUTER_PREFIX + "connection.pool-size"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/3085760e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 32a1cae..2f9781a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -1324,6 +1324,44 @@ public class DFSUtil { } /** + * Map a logical namenode ID to its web address. Use the given nameservice if + * specified, or the configured one if none is given. + * + * @param conf Configuration + * @param nsId which nameservice nnId is a part of, optional + * @param nnId the namenode ID to get the service addr for + * @return the service addr, null if it could not be determined + */ + public static String getNamenodeWebAddr(final Configuration conf, String nsId, + String nnId) { + +if (nsId == null) { + nsId = getOnlyNameServiceIdOrNull(conf); +} + +String webAddrKey = DFSUtilClient.concatSuffixes( +DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, nsId, nnId); + +String webAddr = +conf.get(webAddrKey, DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT); +return webAddr; + } + + /** + * Get all of the Web addresses of the individual NNs in a given nameservice. + * + * @param conf Configuration + * @param nsId the
[12/50] [abbrv] hadoop git commit: YARN-6550. Capture launch_container.sh logs to a separate log file. (Suma Shivaprasad via wangda)
YARN-6550. Capture launch_container.sh logs to a separate log file. (Suma Shivaprasad via wangda) Change-Id: I0ee0b1bb459437432a22cf68861a6354f0decabb Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/febeead5 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/febeead5 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/febeead5 Branch: refs/heads/HDFS-10467 Commit: febeead5f95c6fc245ea3735f5b538d4bb4dc8a4 Parents: 373d0a5 Author: Wangda TanAuthored: Fri Sep 29 16:39:46 2017 -0700 Committer: Wangda Tan Committed: Fri Sep 29 16:39:46 2017 -0700 -- .../server/nodemanager/ContainerExecutor.java | 20 +- .../nodemanager/DefaultContainerExecutor.java | 3 +- .../nodemanager/LinuxContainerExecutor.java | 3 +- .../launcher/ContainerLaunch.java | 189 --- .../launcher/TestContainerLaunch.java | 115 ++- 5 files changed, 290 insertions(+), 40 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/febeead5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index da50d7a..5fd059d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -64,6 +64,9 @@ import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.CONTAINER_PRE_LAUNCH_STDERR; +import static org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.CONTAINER_PRE_LAUNCH_STDOUT; + /** * This class is abstraction of the mechanism used to launch a container on the * underlying OS. All executor implementations must extend ContainerExecutor. @@ -330,6 +333,14 @@ public abstract class ContainerExecutor implements Configurable { String user, String outFilename) throws IOException { ContainerLaunch.ShellScriptBuilder sb = ContainerLaunch.ShellScriptBuilder.create(); + +// Add "set -o pipefail -e" to validate launch_container script. +sb.setExitOnFailure(); + +//Redirect stdout and stderr for launch_container script +sb.stdout(logDir, CONTAINER_PRE_LAUNCH_STDOUT); +sb.stderr(logDir, CONTAINER_PRE_LAUNCH_STDERR); + Set whitelist = new HashSet<>(); String[] nmWhiteList = conf.get(YarnConfiguration.NM_ENV_WHITELIST, @@ -338,10 +349,8 @@ public abstract class ContainerExecutor implements Configurable { whitelist.add(param); } -// Add "set -o pipefail -e" to validate launch_container script. -sb.setExitOnFailure(); - if (environment != null) { + sb.echo("Setting up env variables"); for (Map.Entry env : environment.entrySet()) { if (!whitelist.contains(env.getKey())) { sb.env(env.getKey(), env.getValue()); @@ -352,6 +361,7 @@ public abstract class ContainerExecutor implements Configurable { } if (resources != null) { + sb.echo("Setting up job resources"); for (Map.Entry resourceEntry : resources.entrySet()) { for (String linkName : resourceEntry.getValue()) { @@ -373,15 +383,15 @@ public abstract class ContainerExecutor implements Configurable { if (getConf() != null && getConf().getBoolean(YarnConfiguration.NM_LOG_CONTAINER_DEBUG_INFO, YarnConfiguration.DEFAULT_NM_LOG_CONTAINER_DEBUG_INFO)) { + sb.echo("Copying debugging information"); sb.copyDebugInformation(new Path(outFilename), new Path(logDir, outFilename)); sb.listDebugInformation(new Path(logDir, DIRECTORY_CONTENTS)); } - +sb.echo("Launching container"); sb.command(command); PrintStream pout = null; - try { pout = new PrintStream(out, false, "UTF-8"); sb.write(pout);
[25/50] [abbrv] hadoop git commit: HDFS-11546. Federation Router RPC server. Contributed by Jason Kace and Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9f8a7e20/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java index ee6f57d..2875750 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/MockResolver.java @@ -43,7 +43,7 @@ import org.apache.hadoop.util.Time; /** * In-memory cache/mock of a namenode and file resolver. Stores the most - * recently updated NN information for each nameservice and block pool. Also + * recently updated NN information for each nameservice and block pool. It also * stores a virtual mount table for resolving global namespace paths to local NN * paths. */ @@ -51,82 +51,93 @@ public class MockResolver implements ActiveNamenodeResolver, FileSubclusterResolver { private Mapresolver = - new HashMap (); - private Map locations = - new HashMap (); - private Set namespaces = - new HashSet(); + new HashMap<>(); + private Map locations = new HashMap<>(); + private Set namespaces = new HashSet<>(); private String defaultNamespace = null; + public MockResolver(Configuration conf, StateStoreService store) { this.cleanRegistrations(); } - public void addLocation(String mount, String nameservice, String location) { -RemoteLocation remoteLocation = new RemoteLocation(nameservice, location); -List locationsList = locations.get(mount); + public void addLocation(String mount, String nsId, String location) { +List locationsList = this.locations.get(mount); if (locationsList == null) { - locationsList = new LinkedList(); - locations.put(mount, locationsList); + locationsList = new LinkedList<>(); + this.locations.put(mount, locationsList); } + +final RemoteLocation remoteLocation = new RemoteLocation(nsId, location); if (!locationsList.contains(remoteLocation)) { locationsList.add(remoteLocation); } if (this.defaultNamespace == null) { - this.defaultNamespace = nameservice; + this.defaultNamespace = nsId; } } public synchronized void cleanRegistrations() { -this.resolver = -new HashMap (); -this.namespaces = new HashSet(); +this.resolver = new HashMap<>(); +this.namespaces = new HashSet<>(); } @Override public void updateActiveNamenode( - String ns, InetSocketAddress successfulAddress) { + String nsId, InetSocketAddress successfulAddress) { String address = successfulAddress.getHostName() + ":" + successfulAddress.getPort(); -String key = ns; +String key = nsId; if (key != null) { // Update the active entry @SuppressWarnings("unchecked") - List iterator = - (List) resolver.get(key); - for (FederationNamenodeContext namenode : iterator) { + List namenodes = + (List) this.resolver.get(key); + for (FederationNamenodeContext namenode : namenodes) { if (namenode.getRpcAddress().equals(address)) { MockNamenodeContext nn = (MockNamenodeContext) namenode; nn.setState(FederationNamenodeServiceState.ACTIVE); break; } } - Collections.sort(iterator, new NamenodePriorityComparator()); + // This operation modifies the list so we need to be careful + synchronized(namenodes) { +Collections.sort(namenodes, new NamenodePriorityComparator()); + } } } @Override public List getNamenodesForNameserviceId(String nameserviceId) { -return resolver.get(nameserviceId); +// Return a copy of the list because it is updated periodically +List namenodes = +this.resolver.get(nameserviceId); +return Collections.unmodifiableList(new ArrayList<>(namenodes)); } @Override public List getNamenodesForBlockPoolId( String blockPoolId) { -return resolver.get(blockPoolId); +// Return a copy of the list because it is updated periodically +List namenodes = +this.resolver.get(blockPoolId); +return Collections.unmodifiableList(new ArrayList<>(namenodes)); } private static class MockNamenodeContext implements FederationNamenodeContext { + +private String namenodeId; +private String nameserviceId; + private String webAddress; private String rpcAddress; private String serviceAddress; private String lifelineAddress; -private String namenodeId; -private String
[15/50] [abbrv] hadoop git commit: HADOOP-14095. Document caveats about the default JavaKeyStoreProvider in KMS.
HADOOP-14095. Document caveats about the default JavaKeyStoreProvider in KMS. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06df6ab2 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06df6ab2 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06df6ab2 Branch: refs/heads/HDFS-10467 Commit: 06df6ab25432927cd55918440cc8a28fb4f03bfa Parents: 66c4171 Author: Xiao ChenAuthored: Fri Sep 29 19:17:32 2017 -0700 Committer: Xiao Chen Committed: Fri Sep 29 19:34:29 2017 -0700 -- hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/06df6ab2/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm -- diff --git a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm index 1dd89e9..5490219 100644 --- a/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm +++ b/hadoop-common-project/hadoop-kms/src/site/markdown/index.md.vm @@ -80,6 +80,8 @@ The password file is looked up in the Hadoop's configuration directory via the c NOTE: You need to restart the KMS for the configuration changes to take effect. +NOTE: The KMS server can choose any `KeyProvider` implementation as the backing provider. The example here uses a JavaKeyStoreProvider, which should only be used for experimental purposes and never be used in production. For detailed usage and caveats of JavaKeyStoreProvider, please see [Keystore Passwords section of the Credential Provider API](../hadoop-project-dist/hadoop-common/CredentialProviderAPI.html#Keystore_Passwords). + $H3 KMS HTTP Configuration KMS pre-configures the HTTP port to 9600. @@ -1184,4 +1186,4 @@ and `/stacks`, configure the following properties in `kms-site.xml`: to all users and groups, e.g. '*', '* ' and ' *' are all valid. -``` \ No newline at end of file +``` - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[21/50] [abbrv] hadoop git commit: YARN-7259. Add size-based rolling policy to LogAggregationIndexedFileController. (xgong via wangda)
YARN-7259. Add size-based rolling policy to LogAggregationIndexedFileController. (xgong via wangda) Change-Id: Ifaf82c0aee6b73b9b6ebf103aa72e131e3942f31 Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/280080fa Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/280080fa Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/280080fa Branch: refs/heads/HDFS-10467 Commit: 280080fad01304c85a9ede4d4f7b707eb36c0155 Parents: 2c62ff7 Author: Wangda TanAuthored: Mon Oct 2 15:30:22 2017 -0700 Committer: Wangda Tan Committed: Mon Oct 2 15:30:22 2017 -0700 -- .../ifile/IndexedFileAggregatedLogsBlock.java | 14 +- .../LogAggregationIndexedFileController.java| 397 +-- .../TestLogAggregationIndexFileController.java | 67 +++- 3 files changed, 340 insertions(+), 138 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/280080fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java index c4cbfda..5439b53 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/IndexedFileAggregatedLogsBlock.java @@ -101,10 +101,9 @@ public class IndexedFileAggregatedLogsBlock extends LogAggregationHtmlBlock { return; } -Map checkSumFiles; +Map checkSumFiles; try { - checkSumFiles = fileController.filterFiles(nodeFiles, - LogAggregationIndexedFileController.CHECK_SUM_FILE_SUFFIX); + checkSumFiles = fileController.parseCheckSumFiles(nodeFiles); } catch (IOException ex) { LOG.error("Error getting logs for " + logEntity, ex); html.h1("Error getting logs for " + logEntity); @@ -125,12 +124,11 @@ public class IndexedFileAggregatedLogsBlock extends LogAggregationHtmlBlock { String desiredLogType = $(CONTAINER_LOG_TYPE); try { for (FileStatus thisNodeFile : fileToRead) { -FileStatus checkSum = fileController.getAllChecksumFiles( -checkSumFiles, thisNodeFile.getPath().getName()); +Long checkSumIndex = checkSumFiles.get( +thisNodeFile.getPath().getName()); long endIndex = -1; -if (checkSum != null) { - endIndex = fileController.loadIndexedLogsCheckSum( - checkSum.getPath()); +if (checkSumIndex != null) { + endIndex = checkSumIndex.longValue(); } IndexedLogsMeta indexedLogsMeta = null; try { http://git-wip-us.apache.org/repos/asf/hadoop/blob/280080fa/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java index 243945e..800c0a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/ifile/LogAggregationIndexedFileController.java @@ -29,6 +29,8 @@ import java.io.InputStream; import java.io.OutputStream; import java.io.Serializable; import java.nio.charset.Charset; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; @@ -41,7 +43,6 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; -import java.util.UUID; import org.apache.commons.lang.SerializationUtils; import
[42/50] [abbrv] hadoop git commit: HDFS-11554. [Documentation] Router-based federation documentation. Contributed by Inigo Goiri.
HDFS-11554. [Documentation] Router-based federation documentation. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aec6a9b0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aec6a9b0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aec6a9b0 Branch: refs/heads/HDFS-10467 Commit: aec6a9b056917391a5b7d017eb6729c3dc4fb44b Parents: 9946d20 Author: Inigo GoiriAuthored: Wed Aug 16 17:23:29 2017 -0700 Committer: Inigo Goiri Committed: Mon Oct 2 15:37:35 2017 -0700 -- .../src/site/markdown/HDFSRouterFederation.md | 170 +++ .../site/resources/images/routerfederation.png | Bin 0 -> 24961 bytes 2 files changed, 170 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/aec6a9b0/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md new file mode 100644 index 000..f094238 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md @@ -0,0 +1,170 @@ + + +HDFS Router-based Federation + + + + +Introduction + + +NameNodes have scalability limits because of the metadata overhead comprised of inodes (files and directories) and file blocks, the number of Datanode heartbeats, and the number of HDFS RPC client requests. +The common solution is to split the filesystem into smaller subclusters [HDFS Federation](.Federation.html) and provide a federated view [ViewFs](.ViewFs.html). +The problem is how to maintain the split of the subclusters (e.g., namespace partition), which forces users to connect to multiple subclusters and manage the allocation of folders/files to them. + + +Architecture + + +A natural extension to this partitioned federation is to add a layer of software responsible for federating the namespaces. +This extra layer allows users to access any subcluster transparently, lets subclusters manage their own block pools independently, and supports rebalancing of data across subclusters. +To accomplish these goals, the federation layer directs block accesses to the proper subcluster, maintains the state of the namespaces, and provides mechanisms for data rebalancing. +This layer must be scalable, highly available, and fault tolerant. + +This federation layer comprises multiple components. +The _Router_ component that has the same interface as a NameNode, and forwards the client requests to the correct subcluster, based on ground-truth information from a State Store. +The _State Store_ combines a remote _Mount Table_ (in the flavor of [ViewFs](.ViewFs.html), but shared between clients) and utilization (load/capacity) information about the subclusters. +This approach has the same architecture as [YARN federation](../hadoop-yarn/Federation.html). + +![Router-based Federation Sequence Diagram | width=800](./images/routerfederation.png) + + +### Example flow +The simplest configuration deploys a Router on each NameNode machine. +The Router monitors the local NameNode and heartbeats the state to the State Store. +When a regular DFS client contacts any of the Routers to access a file in the federated filesystem, the Router checks the Mount Table in the State Store (i.e., the local cache) to find out which subcluster contains the file. +Then it checks the Membership table in the State Store (i.e., the local cache) for the NameNode responsible for the subcluster. +After it has identified the correct NameNode, the Router proxies the request. +The client accesses Datanodes directly. + + +### Router +There can be multiple Routers in the system with soft state. +Each Router has two roles: + +* Federated interface: expose a single, global NameNode interface to the clients and forward the requests to the active NameNode in the correct subcluster +* NameNode heartbeat: maintain the information about a NameNode in the State Store + + Federated interface +The Router receives a client request, checks the State Store for the correct subcluster, and forwards the request to the active NameNode of that subcluster. +The reply from the NameNode then flows in the opposite direction. +The Routers are stateless and can be behind a load balancer. +For performance, the Router also caches remote mount table entries and the state of the subclusters. +To make sure that changes have been propagated to all Routers, each Router heartbeats its state to the State Store. + +The communications between the Routers and the State Store are cached (with timed
[44/50] [abbrv] hadoop git commit: HDFS-10646. Federation admin tool. Contributed by Inigo Goiri.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9946d20c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java new file mode 100644 index 000..170247f --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/federation/router/TestRouterAdmin.java @@ -0,0 +1,261 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.federation.router; + +import static org.apache.hadoop.hdfs.server.federation.store.FederationStateStoreTestUtils.synchronizeRecords; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.federation.RouterConfigBuilder; +import org.apache.hadoop.hdfs.server.federation.RouterDFSCluster.RouterContext; +import org.apache.hadoop.hdfs.server.federation.StateStoreDFSCluster; +import org.apache.hadoop.hdfs.server.federation.resolver.MountTableManager; +import org.apache.hadoop.hdfs.server.federation.resolver.RemoteLocation; +import org.apache.hadoop.hdfs.server.federation.store.StateStoreService; +import org.apache.hadoop.hdfs.server.federation.store.impl.MountTableStoreImpl; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.AddMountTableEntryResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.GetMountTableEntriesResponse; +import org.apache.hadoop.hdfs.server.federation.store.protocol.RemoveMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.protocol.UpdateMountTableEntryRequest; +import org.apache.hadoop.hdfs.server.federation.store.records.MountTable; +import org.apache.hadoop.util.Time; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * The administrator interface of the {@link Router} implemented by + * {@link RouterAdminServer}. + */ +public class TestRouterAdmin { + + private static StateStoreDFSCluster cluster; + private static RouterContext routerContext; + public static final String RPC_BEAN = + "Hadoop:service=Router,name=FederationRPC"; + private static List mockMountTable; + private static StateStoreService stateStore; + + @BeforeClass + public static void globalSetUp() throws Exception { +cluster = new StateStoreDFSCluster(false, 1); +// Build and start a router with State Store + admin + RPC +Configuration conf = new RouterConfigBuilder() +.stateStore() +.admin() +.rpc() +.build(); +cluster.addRouterOverrides(conf); +cluster.startRouters(); +routerContext = cluster.getRandomRouter(); +mockMountTable = cluster.generateMockMountTable(); +Router router = routerContext.getRouter(); +stateStore = router.getStateStore(); + } + + @AfterClass + public static void tearDown() { +cluster.stopRouter(routerContext); + } + + @Before + public void testSetup() throws Exception { +assertTrue( +synchronizeRecords(stateStore, mockMountTable, MountTable.class)); + } + + @Test + public void testAddMountTable() throws IOException { +MountTable newEntry = MountTable.newInstance( +"/testpath", Collections.singletonMap("ns0", "/testdir"), +Time.now(), Time.now()); + +RouterClient client = routerContext.getAdminClient(); +MountTableManager mountTable = client.getMountTableManager(); + +// Existing mount table size +List records =