hadoop git commit: YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola.
Repository: hadoop Updated Branches: refs/heads/YARN-7402 f9c69ca3e -> db183f2ea YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db183f2e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db183f2e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db183f2e Branch: refs/heads/YARN-7402 Commit: db183f2ea0a34aeb329fbc0d1553a87f7cf103b7 Parents: f9c69ca Author: Botong Huang <bot...@apache.org> Authored: Wed May 23 12:45:32 2018 -0700 Committer: Botong Huang <bot...@apache.org> Committed: Wed May 23 12:45:32 2018 -0700 -- .../server/globalpolicygenerator/GPGUtils.java | 31 +--- 1 file changed, 20 insertions(+), 11 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/db183f2e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java index 429bec4..31cee1c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java @@ -18,21 +18,22 @@ package org.apache.hadoop.yarn.server.globalpolicygenerator; +import static javax.servlet.http.HttpServletResponse.SC_OK; + import java.util.HashMap; import java.util.Map; import java.util.Set; -import javax.servlet.http.HttpServletResponse; import javax.ws.rs.core.MediaType; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource; -import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; -import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; /** * GPGUtils contains utility functions for the GPG. @@ -53,15 +54,23 @@ public final class GPGUtils { T obj = null; WebResource webResource = client.resource(webAddr); -ClientResponse response = webResource.path("ws/v1/cluster").path(path) -.accept(MediaType.APPLICATION_XML).get(ClientResponse.class); -if (response.getStatus() == HttpServletResponse.SC_OK) { - obj = response.getEntity(returnType); -} else { - throw new YarnRuntimeException("Bad response from remote web service: " - + response.getStatus()); +ClientResponse response = null; +try { + response = webResource.path("ws/v1/cluster").path(path) + .accept(MediaType.APPLICATION_XML).get(ClientResponse.class); + if (response.getStatus() == SC_OK) { +obj = response.getEntity(returnType); + } else { +throw new YarnRuntimeException( +"Bad response from remote web service: " + response.getStatus()); + } + return obj; +} finally { + if (response != null) { +response.close(); + } + client.destroy(); } -return obj; } /** - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[17/50] [abbrv] hadoop git commit: HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient.
HDFS-13611. Unsafe use of Text as a ConcurrentHashMap key in PBHelperClient. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9b63deb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9b63deb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9b63deb Branch: refs/heads/YARN-7402 Commit: c9b63deb533274ca8ef4939f6cd13f728a067f7b Parents: 1388de1 Author: Andrew Wang Authored: Thu May 24 09:56:23 2018 -0700 Committer: Andrew Wang Committed: Thu May 24 09:56:23 2018 -0700 -- .../java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9b63deb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index 579ac43..490ccb4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -247,7 +247,7 @@ public class PBHelperClient { ByteString value = fixedByteStringCache.get(key); if (value == null) { value = ByteString.copyFromUtf8(key.toString()); - fixedByteStringCache.put(key, value); + fixedByteStringCache.put(new Text(key.copyBytes()), value); } return value; } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[46/50] [abbrv] hadoop git commit: YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong)
YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46a4a945 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46a4a945 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46a4a945 Branch: refs/heads/YARN-7402 Commit: 46a4a945732afdefec9828d1c43b77d32609bb8a Parents: bca8e9b Author: Botong Huang Authored: Thu Feb 1 14:43:48 2018 -0800 Committer: Botong Huang Committed: Tue May 29 10:48:40 2018 -0700 -- .../dev-support/findbugs-exclude.xml| 5 + .../hadoop/yarn/conf/YarnConfiguration.java | 18 +++ .../src/main/resources/yarn-default.xml | 24 .../store/impl/MemoryFederationStateStore.java | 13 ++ .../utils/FederationStateStoreFacade.java | 41 ++- .../GlobalPolicyGenerator.java | 92 ++- .../subclustercleaner/SubClusterCleaner.java| 109 + .../subclustercleaner/package-info.java | 19 +++ .../TestSubClusterCleaner.java | 118 +++ 9 files changed, 409 insertions(+), 30 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a4a945/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 5841361..bf2e376 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -380,6 +380,11 @@ + + + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a4a945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index f7f82f8..7c78e0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -3326,6 +3326,24 @@ public class YarnConfiguration extends Configuration { public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED = false; + private static final String FEDERATION_GPG_PREFIX = + FEDERATION_PREFIX + "gpg."; + + // The number of threads to use for the GPG scheduled executor service + public static final String GPG_SCHEDULED_EXECUTOR_THREADS = + FEDERATION_GPG_PREFIX + "scheduled.executor.threads"; + public static final int DEFAULT_GPG_SCHEDULED_EXECUTOR_THREADS = 10; + + // The interval at which the subcluster cleaner runs, -1 means disabled + public static final String GPG_SUBCLUSTER_CLEANER_INTERVAL_MS = + FEDERATION_GPG_PREFIX + "subcluster.cleaner.interval-ms"; + public static final long DEFAULT_GPG_SUBCLUSTER_CLEANER_INTERVAL_MS = -1; + + // The expiration time for a subcluster heartbeat, default is 30 minutes + public static final String GPG_SUBCLUSTER_EXPIRATION_MS = + FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms"; + public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 180; + // Other Configs http://git-wip-us.apache.org/repos/asf/hadoop/blob/46a4a945/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index b0ffc48..8a450d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -3524,6 +3524,30 @@ + The number of threads to use for the GPG scheduled executor service. + +yarn.federation.gpg.scheduled.executor.threads +10 + + + + + The interval at which the subcluster cleaner runs, -1 means disabled. + +yarn.federation.gpg.subcluster.cleaner.interval-ms +-1 + + + + + The expiration time for a sub
[16/50] [abbrv] hadoop git commit: YARN-6919. Add default volume mount list. Contributed by Eric Badger
YARN-6919. Add default volume mount list. Contributed by Eric Badger Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1388de18 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1388de18 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1388de18 Branch: refs/heads/YARN-7402 Commit: 1388de18ad51434569589a8f5b0b05c38fe02ab3 Parents: 774daa8 Author: Shane Kumpf Authored: Thu May 24 09:30:39 2018 -0600 Committer: Shane Kumpf Committed: Thu May 24 09:30:39 2018 -0600 -- .../hadoop/yarn/conf/YarnConfiguration.java | 10 ++ .../src/main/resources/yarn-default.xml | 14 ++ .../runtime/DockerLinuxContainerRuntime.java| 38 + .../runtime/TestDockerContainerRuntime.java | 138 +++ 4 files changed, 200 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1388de18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 004a59f..f7f82f8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2002,6 +2002,16 @@ public class YarnConfiguration extends Configuration { */ public static final int DEFAULT_NM_DOCKER_STOP_GRACE_PERIOD = 10; + /** The default list of read-only mounts to be bind-mounted into all + * Docker containers that use DockerContainerRuntime. */ + public static final String NM_DOCKER_DEFAULT_RO_MOUNTS = + DOCKER_CONTAINER_RUNTIME_PREFIX + "default-ro-mounts"; + + /** The default list of read-write mounts to be bind-mounted into all + * Docker containers that use DockerContainerRuntime. */ + public static final String NM_DOCKER_DEFAULT_RW_MOUNTS = + DOCKER_CONTAINER_RUNTIME_PREFIX + "default-rw-mounts"; + /** The mode in which the Java Container Sandbox should run detailed by * the JavaSandboxLinuxContainerRuntime. */ public static final String YARN_CONTAINER_SANDBOX = http://git-wip-us.apache.org/repos/asf/hadoop/blob/1388de18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index c82474c..b0ffc48 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -1811,6 +1811,20 @@ +The default list of read-only mounts to be bind-mounted + into all Docker containers that use DockerContainerRuntime. +yarn.nodemanager.runtime.linux.docker.default-ro-mounts + + + + +The default list of read-write mounts to be bind-mounted + into all Docker containers that use DockerContainerRuntime. +yarn.nodemanager.runtime.linux.docker.default-rw-mounts + + + + The mode in which the Java Container Sandbox should run detailed by the JavaSandboxLinuxContainerRuntime. yarn.nodemanager.runtime.linux.sandbox-mode http://git-wip-us.apache.org/repos/asf/hadoop/blob/1388de18/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java index e131e9d..5e2233b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java +++
[11/50] [abbrv] hadoop git commit: HDFS-13598. Reduce unnecessary byte-to-string transform operation in INodesInPath#toString. Contributed by Gabor Bota.
HDFS-13598. Reduce unnecessary byte-to-string transform operation in INodesInPath#toString. Contributed by Gabor Bota. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7a87add4 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7a87add4 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7a87add4 Branch: refs/heads/YARN-7402 Commit: 7a87add4ea4c317aa9377d1fc8e43fb5e7418a46 Parents: d996479 Author: Yiqun Lin Authored: Thu May 24 10:57:35 2018 +0800 Committer: Yiqun Lin Committed: Thu May 24 10:57:35 2018 +0800 -- .../java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7a87add4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java index 8235bf0..50ead61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodesInPath.java @@ -484,7 +484,7 @@ public class INodesInPath { } final StringBuilder b = new StringBuilder(getClass().getSimpleName()) -.append(": path = ").append(DFSUtil.byteArray2PathString(path)) +.append(": path = ").append(getPath()) .append("\n inodes = "); if (inodes == null) { b.append("null"); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[02/50] [abbrv] hadoop git commit: HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar.
HADOOP-15486. Make NetworkTopology#netLock fair. Contributed by Nanda kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/51ce02bb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/51ce02bb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/51ce02bb Branch: refs/heads/YARN-7402 Commit: 51ce02bb54d6047a8191624a86d427b0c9445cb1 Parents: aa23d49 Author: Arpit Agarwal Authored: Wed May 23 10:30:12 2018 -0700 Committer: Arpit Agarwal Committed: Wed May 23 10:30:12 2018 -0700 -- .../src/main/java/org/apache/hadoop/net/NetworkTopology.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/51ce02bb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java index 256f07b..1f077a7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java @@ -105,7 +105,7 @@ public class NetworkTopology { private boolean clusterEverBeenMultiRack = false; /** the lock used to manage access */ - protected ReadWriteLock netlock = new ReentrantReadWriteLock(); + protected ReadWriteLock netlock = new ReentrantReadWriteLock(true); // keeping the constructor because other components like MR still uses this. public NetworkTopology() { - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[48/50] [abbrv] hadoop git commit: YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json new file mode 100644 index 000..2ff879e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json @@ -0,0 +1,196 @@ + { + "type": "capacityScheduler", + "capacity": 100.0, + "usedCapacity": 0.0, + "maxCapacity": 100.0, + "queueName": "root", + "queues": { +"queue": [ + { +"type": "capacitySchedulerLeafQueueInfo", +"capacity": 100.0, +"usedCapacity": 0.0, +"maxCapacity": 100.0, +"absoluteCapacity": 100.0, +"absoluteMaxCapacity": 100.0, +"absoluteUsedCapacity": 0.0, +"numApplications": 484, +"queueName": "default", +"state": "RUNNING", +"resourcesUsed": { + "memory": 0, + "vCores": 0 +}, +"hideReservationQueues": false, +"nodeLabels": [ + "*" +], +"numActiveApplications": 484, +"numPendingApplications": 0, +"numContainers": 0, +"maxApplications": 1, +"maxApplicationsPerUser": 1, +"userLimit": 100, +"users": { + "user": [ +{ + "username": "Default", + "resourcesUsed": { +"memory": 0, +"vCores": 0 + }, + "numPendingApplications": 0, + "numActiveApplications": 468, + "AMResourceUsed": { +"memory": 30191616, +"vCores": 468 + }, + "userResourceLimit": { +"memory": 31490048, +"vCores": 7612 + } +} + ] +}, +"userLimitFactor": 1.0, +"AMResourceLimit": { + "memory": 31490048, + "vCores": 7612 +}, +"usedAMResource": { + "memory": 30388224, + "vCores": 532 +}, +"userAMResourceLimit": { + "memory": 31490048, + "vCores": 7612 +}, +"preemptionDisabled": true + }, + { +"type": "capacitySchedulerLeafQueueInfo", +"capacity": 100.0, +"usedCapacity": 0.0, +"maxCapacity": 100.0, +"absoluteCapacity": 100.0, +"absoluteMaxCapacity": 100.0, +"absoluteUsedCapacity": 0.0, +"numApplications": 484, +"queueName": "default2", +"state": "RUNNING", +"resourcesUsed": { + "memory": 0, + "vCores": 0 +}, +"hideReservationQueues": false, +"nodeLabels": [ + "*" +], +"numActiveApplications": 484, +"numPendingApplications": 0, +"numContainers": 0, +"maxApplications": 1, +"maxApplicationsPerUser": 1, +"userLimit": 100, +"users": { + "user": [ +{ + "username": "Default", + "resourcesUsed": { +"memory": 0, +"vCores": 0 + }, + "numPendingApplications": 0, + "numActiveApplications": 468, + "AMResourceUsed": { +"memory": 30191616, +"vCores": 468 + }, + "userResourceLimit": { +"memory": 31490048, +"vCores": 7612 + } +} + ] +}, +"userLimitFactor": 1.0, +"AMResourceLimit": { + "memory": 31490048, + "vCores": 7612 +}, +"usedAMResource": { + "memory": 30388224, + "vCores": 532 +}, +"userAMResourceLimit": { + "memory": 31490048, + "vCores": 7612 +}, +"preemptionDisabled": true + } +] + }, + "health": { +"lastrun": 1517951638085, +"operationsInfo": { + "entry": { +"key":
[21/50] [abbrv] hadoop git commit: YARN-8191. Fair scheduler: queue deletion without RM restart. (Gergo Repas via Haibo Chen)
YARN-8191. Fair scheduler: queue deletion without RM restart. (Gergo Repas via Haibo Chen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86bc6425 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86bc6425 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86bc6425 Branch: refs/heads/YARN-7402 Commit: 86bc6425d425913899f1d951498bd040e453b3d0 Parents: d9852eb Author: Haibo Chen Authored: Thu May 24 17:07:21 2018 -0700 Committer: Haibo Chen Committed: Thu May 24 17:12:34 2018 -0700 -- .../fair/AllocationFileLoaderService.java | 16 +- .../scheduler/fair/FSLeafQueue.java | 31 ++ .../resourcemanager/scheduler/fair/FSQueue.java | 9 + .../scheduler/fair/FairScheduler.java | 29 +- .../scheduler/fair/QueueManager.java| 155 +++-- .../fair/TestAllocationFileLoaderService.java | 100 +++--- .../scheduler/fair/TestQueueManager.java| 337 +++ 7 files changed, 596 insertions(+), 81 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bc6425/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java index d8d9051..7a40b6a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AllocationFileLoaderService.java @@ -87,7 +87,7 @@ public class AllocationFileLoaderService extends AbstractService { private Path allocFile; private FileSystem fs; - private Listener reloadListener; + private final Listener reloadListener; @VisibleForTesting long reloadIntervalMs = ALLOC_RELOAD_INTERVAL_MS; @@ -95,15 +95,16 @@ public class AllocationFileLoaderService extends AbstractService { private Thread reloadThread; private volatile boolean running = true; - public AllocationFileLoaderService() { -this(SystemClock.getInstance()); + public AllocationFileLoaderService(Listener reloadListener) { +this(reloadListener, SystemClock.getInstance()); } private List defaultPermissions; - public AllocationFileLoaderService(Clock clock) { + public AllocationFileLoaderService(Listener reloadListener, Clock clock) { super(AllocationFileLoaderService.class.getName()); this.clock = clock; +this.reloadListener = reloadListener; } @Override @@ -114,6 +115,7 @@ public class AllocationFileLoaderService extends AbstractService { reloadThread = new Thread(() -> { while (running) { try { +reloadListener.onCheck(); long time = clock.getTime(); long lastModified = fs.getFileStatus(allocFile).getModificationTime(); @@ -207,10 +209,6 @@ public class AllocationFileLoaderService extends AbstractService { return allocPath; } - public synchronized void setReloadListener(Listener reloadListener) { -this.reloadListener = reloadListener; - } - /** * Updates the allocation list from the allocation config file. This file is * expected to be in the XML format specified in the design doc. @@ -351,5 +349,7 @@ public class AllocationFileLoaderService extends AbstractService { public interface Listener { void onReload(AllocationConfiguration info) throws IOException; + +void onCheck(); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/86bc6425/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java
[45/50] [abbrv] hadoop git commit: HDDS-125. Cleanup HDDS CheckStyle issues. Contributed by Anu Engineer.
HDDS-125. Cleanup HDDS CheckStyle issues. Contributed by Anu Engineer. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9502b47b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9502b47b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9502b47b Branch: refs/heads/YARN-7402 Commit: 9502b47bd2a3cf32edae635293169883c2914475 Parents: 17aa40f Author: Anu Engineer Authored: Tue May 29 09:54:06 2018 -0700 Committer: Anu Engineer Committed: Tue May 29 09:54:06 2018 -0700 -- .../hadoop/hdds/scm/block/BlockManagerImpl.java | 1 - .../hdds/scm/block/DeletedBlockLogImpl.java | 2 +- .../hdds/scm/container/ContainerMapping.java| 6 +- .../scm/container/ContainerStateManager.java| 24 +++ .../hadoop/hdds/scm/container/Mapping.java | 9 ++- .../hdds/scm/node/SCMNodeStorageStatMXBean.java | 4 +- .../hdds/scm/node/SCMNodeStorageStatMap.java| 19 +++--- .../hdds/scm/node/StorageReportResult.java | 8 +-- .../hdds/scm/node/states/Node2ContainerMap.java | 2 +- .../hdds/scm/pipelines/PipelineSelector.java| 5 +- .../scm/server/StorageContainerManager.java | 3 +- .../TestStorageContainerManagerHttpServer.java | 1 - .../hadoop/hdds/scm/block/package-info.java | 23 +++ .../scm/container/TestContainerMapping.java | 12 ++-- .../hdds/scm/container/closer/package-info.java | 22 +++ .../hadoop/hdds/scm/container/package-info.java | 22 +++ .../hdds/scm/container/states/package-info.java | 22 +++ .../hadoop/hdds/scm/node/TestNodeManager.java | 66 ++-- .../scm/node/TestSCMNodeStorageStatMap.java | 32 +- .../hadoop/hdds/scm/node/package-info.java | 22 +++ .../ozone/container/common/TestEndPoint.java| 2 - .../ozone/container/common/package-info.java| 22 +++ .../ozone/container/placement/package-info.java | 22 +++ .../replication/TestContainerSupervisor.java| 7 ++- 24 files changed, 263 insertions(+), 95 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index 5a98e85..d17d6c0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -41,7 +41,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Random; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index cabcb46..cedc506 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -190,7 +190,7 @@ public class DeletedBlockLogImpl implements DeletedBlockLog { try { for(Long txID : txIDs) { try { - byte [] deleteBlockBytes = + byte[] deleteBlockBytes = deletedStore.get(Longs.toByteArray(txID)); if (deleteBlockBytes == null) { LOG.warn("Delete txID {} not found", txID); http://git-wip-us.apache.org/repos/asf/hadoop/blob/9502b47b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java index e569874..2d88621 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java @@ -152,7 +152,8 @@ public class ContainerMapping implements Mapping { ContainerInfo containerInfo; lock.lock(); try { - byte[]
[15/50] [abbrv] hadoop git commit: HDDS-45. Removal of old OzoneRestClient. Contributed by Lokesh Jain.
HDDS-45. Removal of old OzoneRestClient. Contributed by Lokesh Jain. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/774daa8d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/774daa8d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/774daa8d Branch: refs/heads/YARN-7402 Commit: 774daa8d532f91fe8e342a8da2cfa65a8629 Parents: c05b5d4 Author: Mukul Kumar Singh Authored: Thu May 24 15:53:42 2018 +0530 Committer: Mukul Kumar Singh Committed: Thu May 24 15:53:42 2018 +0530 -- .../apache/hadoop/hdds/scm/XceiverClient.java | 22 +- .../hadoop/ozone/web/client/OzoneBucket.java| 646 --- .../hadoop/ozone/web/client/OzoneKey.java | 44 - .../ozone/web/client/OzoneRestClient.java | 804 --- .../hadoop/ozone/web/client/OzoneVolume.java| 584 -- .../hadoop/ozone/web/client/package-info.java | 34 - .../hadoop/ozone/MiniOzoneClusterImpl.java | 3 +- .../apache/hadoop/ozone/RatisTestHelper.java| 14 +- .../ozone/web/TestOzoneRestWithMiniCluster.java | 207 ++--- .../hadoop/ozone/web/client/TestBuckets.java| 193 +++-- .../ozone/web/client/TestBucketsRatis.java | 15 +- .../hadoop/ozone/web/client/TestKeys.java | 286 --- .../hadoop/ozone/web/client/TestKeysRatis.java | 29 +- .../hadoop/ozone/web/client/TestVolume.java | 285 +++ .../ozone/web/client/TestVolumeRatis.java | 29 +- 15 files changed, 548 insertions(+), 2647 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java -- diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java index 6d33cd4..42e02f9 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClient.java @@ -54,6 +54,7 @@ public class XceiverClient extends XceiverClientSpi { private Bootstrap b; private EventLoopGroup group; private final Semaphore semaphore; + private boolean closed = false; /** * Constructs a client that can communicate with the Container framework on @@ -74,6 +75,10 @@ public class XceiverClient extends XceiverClientSpi { @Override public void connect() throws Exception { +if (closed) { + throw new IOException("This channel is not connected."); +} + if (channel != null && channel.isActive()) { throw new IOException("This client is already connected to a host."); } @@ -97,6 +102,18 @@ public class XceiverClient extends XceiverClientSpi { channel = b.connect(leader.getHostName(), port).sync().channel(); } + public void reconnect() throws IOException { +try { + connect(); + if (channel == null || !channel.isActive()) { +throw new IOException("This channel is not connected."); + } +} catch (Exception e) { + LOG.error("Error while connecting: ", e); + throw new IOException(e); +} + } + /** * Returns if the exceiver client connects to a server. * @@ -109,6 +126,7 @@ public class XceiverClient extends XceiverClientSpi { @Override public void close() { +closed = true; if (group != null) { group.shutdownGracefully().awaitUninterruptibly(); } @@ -124,7 +142,7 @@ public class XceiverClient extends XceiverClientSpi { ContainerProtos.ContainerCommandRequestProto request) throws IOException { try { if ((channel == null) || (!channel.isActive())) { -throw new IOException("This channel is not connected."); +reconnect(); } XceiverClientHandler handler = channel.pipeline().get(XceiverClientHandler.class); @@ -160,7 +178,7 @@ public class XceiverClient extends XceiverClientSpi { sendCommandAsync(ContainerProtos.ContainerCommandRequestProto request) throws IOException, ExecutionException, InterruptedException { if ((channel == null) || (!channel.isActive())) { - throw new IOException("This channel is not connected."); + reconnect(); } XceiverClientHandler handler = channel.pipeline().get(XceiverClientHandler.class); http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java -- diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/web/client/OzoneBucket.java
[26/50] [abbrv] hadoop git commit: HDDS-96. Add an option in ozone script to generate a site file with minimally required ozone configs. Contributed by Dinesh Chitlangia.
HDDS-96. Add an option in ozone script to generate a site file with minimally required ozone configs. Contributed by Dinesh Chitlangia. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8733012a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8733012a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8733012a Branch: refs/heads/YARN-7402 Commit: 8733012ae35f2762d704f94975a762885d116795 Parents: 1e0d4b1 Author: Anu Engineer Authored: Fri May 25 13:06:14 2018 -0700 Committer: Anu Engineer Committed: Fri May 25 13:06:14 2018 -0700 -- .../hadoop/hdds/conf/OzoneConfiguration.java| 6 +- hadoop-ozone/common/src/main/bin/ozone | 4 + ...TestGenerateOzoneRequiredConfigurations.java | 100 +++ .../GenerateOzoneRequiredConfigurations.java| 174 +++ .../hadoop/ozone/genconf/package-info.java | 24 +++ 5 files changed, 305 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index f07718c..36d953c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -137,7 +137,7 @@ public class OzoneConfiguration extends Configuration { @Override public String toString() { - return this.getName() + " " + this.getValue() + this.getTag(); + return this.getName() + " " + this.getValue() + " " + this.getTag(); } @Override @@ -152,11 +152,11 @@ public class OzoneConfiguration extends Configuration { } } - public static void activate(){ + public static void activate() { // adds the default resources Configuration.addDefaultResource("hdfs-default.xml"); Configuration.addDefaultResource("hdfs-site.xml"); Configuration.addDefaultResource("ozone-default.xml"); Configuration.addDefaultResource("ozone-site.xml"); } -} \ No newline at end of file +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-ozone/common/src/main/bin/ozone -- diff --git a/hadoop-ozone/common/src/main/bin/ozone b/hadoop-ozone/common/src/main/bin/ozone index 00261c7..6843bdd 100755 --- a/hadoop-ozone/common/src/main/bin/ozone +++ b/hadoop-ozone/common/src/main/bin/ozone @@ -47,6 +47,7 @@ function hadoop_usage hadoop_add_subcommand "scm" daemon "run the Storage Container Manager service" hadoop_add_subcommand "scmcli" client "run the CLI of the Storage Container Manager " hadoop_add_subcommand "version" client "print the version" + hadoop_add_subcommand "genconf" client "generate minimally required ozone configs and output to ozone-site.xml in specified path" hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false } @@ -118,6 +119,9 @@ function ozonecmd_case version) HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo ;; +genconf) + HADOOP_CLASSNAME=org.apache.hadoop.ozone.genconf.GenerateOzoneRequiredConfigurations +;; *) HADOOP_CLASSNAME="${subcmd}" if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then http://git-wip-us.apache.org/repos/asf/hadoop/blob/8733012a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java -- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java new file mode 100644 index 000..82582a6 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java @@ -0,0 +1,100 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing,
[25/50] [abbrv] hadoop git commit: HDFS-13618. Fix TestDataNodeFaultInjector test failures on Windows. Contributed by Xiao Liang.
HDFS-13618. Fix TestDataNodeFaultInjector test failures on Windows. Contributed by Xiao Liang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e0d4b1c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e0d4b1c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e0d4b1c Branch: refs/heads/YARN-7402 Commit: 1e0d4b1c283fb98a95c60a1723f594befb3c18a9 Parents: 02322de Author: Inigo Goiri Authored: Fri May 25 09:10:32 2018 -0700 Committer: Inigo Goiri Committed: Fri May 25 09:14:28 2018 -0700 -- .../hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e0d4b1c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java index 1507844..4afacd9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeFaultInjector.java @@ -118,7 +118,7 @@ public class TestDataNodeFaultInjector { final MetricsDataNodeFaultInjector mdnFaultInjector) throws Exception { final Path baseDir = new Path( -PathUtils.getTestDir(getClass()).getAbsolutePath(), +PathUtils.getTestDir(getClass()).getPath(), GenericTestUtils.getMethodName()); final DataNodeFaultInjector oldDnInjector = DataNodeFaultInjector.get(); DataNodeFaultInjector.set(mdnFaultInjector); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[19/50] [abbrv] hadoop git commit: YARN-8316. Improved diagnostic message for ATS unavailability for YARN Service. Contributed by Billie Rinaldi
YARN-8316. Improved diagnostic message for ATS unavailability for YARN Service. Contributed by Billie Rinaldi Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ff5a402 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ff5a402 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ff5a402 Branch: refs/heads/YARN-7402 Commit: 7ff5a40218241ad2380595175a493794129a7402 Parents: 2d19e7d Author: Eric Yang Authored: Thu May 24 16:26:02 2018 -0400 Committer: Eric Yang Committed: Thu May 24 16:26:02 2018 -0400 -- .../org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java | 2 +- .../org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ff5a402/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java index 072e606..1ceb462 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java @@ -400,7 +400,7 @@ public class YarnClientImpl extends YarnClient { + e.getMessage()); return null; } - throw e; + throw new IOException(e); } catch (NoClassDefFoundError e) { NoClassDefFoundError wrappedError = new NoClassDefFoundError( e.getMessage() + ". It appears that the timeline client " http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ff5a402/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index b84b49c..70ff47b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -1159,7 +1159,7 @@ public class TestYarnClient extends ParameterizedSchedulerTestBase { TimelineClient createTimelineClient() throws IOException, YarnException { timelineClient = mock(TimelineClient.class); when(timelineClient.getDelegationToken(any(String.class))) - .thenThrow(new IOException("Best effort test exception")); + .thenThrow(new RuntimeException("Best effort test exception")); return timelineClient; } }); @@ -1175,7 +1175,7 @@ public class TestYarnClient extends ParameterizedSchedulerTestBase { client.serviceInit(conf); client.getTimelineDelegationToken(); Assert.fail("Get delegation token should have thrown an exception"); -} catch (Exception e) { +} catch (IOException e) { // Success } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[33/50] [abbrv] hadoop git commit: MAPREDUCE-7097. MapReduce JHS should honor yarn.webapp.filter-entity-list-by-user. Contributed by Sunil Govindan.
MAPREDUCE-7097. MapReduce JHS should honor yarn.webapp.filter-entity-list-by-user. Contributed by Sunil Govindan. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88cbe57c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88cbe57c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88cbe57c Branch: refs/heads/YARN-7402 Commit: 88cbe57c069a1d2dd3bfb32e3ad742566470a10b Parents: d14e26b Author: Rohith Sharma K S Authored: Mon May 28 12:45:07 2018 +0530 Committer: Rohith Sharma K S Committed: Mon May 28 14:05:49 2018 +0530 -- .../mapreduce/v2/hs/webapp/HsJobBlock.java | 18 ++- .../mapreduce/v2/hs/webapp/TestHsJobBlock.java | 20 ++-- .../apache/hadoop/yarn/webapp/Controller.java | 4 .../org/apache/hadoop/yarn/webapp/View.java | 24 +--- 4 files changed, 55 insertions(+), 11 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cbe57c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java index 18040f0..9b845cd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsJobBlock.java @@ -27,6 +27,8 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH; import java.util.Date; import java.util.List; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.TaskID; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; @@ -39,8 +41,10 @@ import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo; import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig; import org.apache.hadoop.mapreduce.v2.util.MRApps; import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.mapreduce.v2.util.MRWebAppUtil; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.util.Times; import org.apache.hadoop.yarn.webapp.ResponseInfo; import org.apache.hadoop.yarn.webapp.hamlet2.Hamlet; @@ -56,9 +60,14 @@ import com.google.inject.Inject; */ public class HsJobBlock extends HtmlBlock { final AppContext appContext; + private UserGroupInformation ugi; + private boolean isFilterAppListByUserEnabled; - @Inject HsJobBlock(AppContext appctx) { + @Inject HsJobBlock(Configuration conf, AppContext appctx, ViewContext ctx) { +super(ctx); appContext = appctx; +isFilterAppListByUserEnabled = conf +.getBoolean(YarnConfiguration.FILTER_ENTITY_LIST_BY_USER, false); } /* @@ -78,6 +87,13 @@ public class HsJobBlock extends HtmlBlock { html.p().__("Sorry, ", jid, " not found.").__(); return; } +ugi = getCallerUGI(); +if (isFilterAppListByUserEnabled && ugi != null +&& !j.checkAccess(ugi, JobACL.VIEW_JOB)) { + html.p().__("Sorry, ", jid, " could not be viewed for '", + ugi.getUserName(), "'.").__(); + return; +} if(j instanceof UnparsedJob) { final int taskCount = j.getTotalMaps() + j.getTotalReduces(); UnparsedJob oversizedJob = (UnparsedJob) j; http://git-wip-us.apache.org/repos/asf/hadoop/blob/88cbe57c/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java index 7fa238e..48e3d3b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsJobBlock.java +++
[41/50] [abbrv] hadoop git commit: YARN-8338. TimelineService V1.5 doesn't come up after HADOOP-15406. Contributed by Vinod Kumar Vavilapalli
YARN-8338. TimelineService V1.5 doesn't come up after HADOOP-15406. Contributed by Vinod Kumar Vavilapalli Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/31ab960f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/31ab960f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/31ab960f Branch: refs/heads/YARN-7402 Commit: 31ab960f4f931df273481927b897388895d803ba Parents: 438ef49 Author: Jason Lowe Authored: Tue May 29 11:00:30 2018 -0500 Committer: Jason Lowe Committed: Tue May 29 11:00:30 2018 -0500 -- hadoop-project/pom.xml | 5 + .../hadoop-yarn-server-applicationhistoryservice/pom.xml| 5 + 2 files changed, 10 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ab960f/hadoop-project/pom.xml -- diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 73c3f5b..59a9bd2 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -1144,6 +1144,11 @@ 1.8.5 +org.objenesis +objenesis +1.0 + + org.mock-server mockserver-netty 3.9.2 http://git-wip-us.apache.org/repos/asf/hadoop/blob/31ab960f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml index f310518..0527095 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml @@ -155,6 +155,11 @@ leveldbjni-all + + org.objenesis + objenesis + + org.apache.hadoop - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[23/50] [abbrv] hadoop git commit: YARN-8292: Fix the dominant resource preemption cannot happen when some of the resource vector becomes negative. Contributed by Wangda Tan.
YARN-8292: Fix the dominant resource preemption cannot happen when some of the resource vector becomes negative. Contributed by Wangda Tan. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d5509c6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d5509c6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d5509c6 Branch: refs/heads/YARN-7402 Commit: 8d5509c68156faaa6641f4e747fc9ff80adccf88 Parents: bddfe79 Author: Eric E Payne Authored: Fri May 25 16:06:09 2018 + Committer: Eric E Payne Committed: Fri May 25 16:06:09 2018 + -- .../resource/DefaultResourceCalculator.java | 15 ++- .../resource/DominantResourceCalculator.java| 39 --- .../yarn/util/resource/ResourceCalculator.java | 13 ++- .../hadoop/yarn/util/resource/Resources.java| 5 - .../AbstractPreemptableResourceCalculator.java | 58 --- .../CapacitySchedulerPreemptionUtils.java | 61 +-- .../capacity/FifoCandidatesSelector.java| 8 +- .../FifoIntraQueuePreemptionPlugin.java | 4 +- .../capacity/IntraQueueCandidatesSelector.java | 2 +- .../capacity/PreemptableResourceCalculator.java | 6 +- .../monitor/capacity/TempQueuePerPartition.java | 8 +- ...alCapacityPreemptionPolicyMockFramework.java | 30 ++ .../TestPreemptionForQueueWithPriorities.java | 103 --- ...pacityPreemptionPolicyInterQueueWithDRF.java | 60 ++- 14 files changed, 312 insertions(+), 100 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java index 6375c4a..ab6d7f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java @@ -136,13 +136,18 @@ public class DefaultResourceCalculator extends ResourceCalculator { } @Override - public boolean isAnyMajorResourceZero(Resource resource) { -return resource.getMemorySize() == 0f; - } - - @Override public Resource normalizeDown(Resource r, Resource stepFactor) { return Resources.createResource( roundDown((r.getMemorySize()), stepFactor.getMemorySize())); } + + @Override + public boolean isAnyMajorResourceZeroOrNegative(Resource resource) { +return resource.getMemorySize() <= 0; + } + + @Override + public boolean isAnyMajorResourceAboveZero(Resource resource) { +return resource.getMemorySize() > 0; + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d5509c6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java index 6fed23b..2e85ebc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java @@ -577,19 +577,6 @@ public class DominantResourceCalculator extends ResourceCalculator { } @Override - public boolean isAnyMajorResourceZero(Resource resource) { -int maxLength = ResourceUtils.getNumberOfKnownResourceTypes(); -for (int i = 0; i < maxLength; i++) { - ResourceInformation resourceInformation = resource - .getResourceInformation(i); - if (resourceInformation.getValue() == 0L) { -return true; - } -} -return false; - } - - @Override public Resource normalizeDown(Resource r, Resource stepFactor) { Resource ret = Resource.newInstance(r); int maxLength = ResourceUtils.getNumberOfKnownResourceTypes(); @@ -613,4 +600,30 @@ public class DominantResourceCalculator extends ResourceCalculator { } return ret; } + + @Override + public boolean isAnyMajorResourceZeroOrNegative(Resource resource)
[50/50] [abbrv] hadoop git commit: YARN-3660. [GPG] Federation Global Policy Generator (service hook only). (Contributed by Botong Huang via curino)
YARN-3660. [GPG] Federation Global Policy Generator (service hook only). (Contributed by Botong Huang via curino) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bca8e9bf Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bca8e9bf Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bca8e9bf Branch: refs/heads/YARN-7402 Commit: bca8e9bf9d6c0d99e15d45dfb714ca5677ac4e0a Parents: 9502b47 Author: Carlo Curino Authored: Thu Jan 18 17:21:06 2018 -0800 Committer: Botong Huang Committed: Tue May 29 10:48:40 2018 -0700 -- hadoop-project/pom.xml | 6 + hadoop-yarn-project/hadoop-yarn/bin/yarn| 5 + hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd| 55 +--- .../hadoop-yarn/conf/yarn-env.sh| 12 ++ .../pom.xml | 98 + .../globalpolicygenerator/GPGContext.java | 31 + .../globalpolicygenerator/GPGContextImpl.java | 41 ++ .../GlobalPolicyGenerator.java | 136 +++ .../globalpolicygenerator/package-info.java | 19 +++ .../TestGlobalPolicyGenerator.java | 38 ++ .../hadoop-yarn/hadoop-yarn-server/pom.xml | 1 + hadoop-yarn-project/pom.xml | 4 + 12 files changed, 424 insertions(+), 22 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-project/pom.xml -- diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 59a9bd2..2db538e 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -446,6 +446,12 @@ org.apache.hadoop +hadoop-yarn-server-globalpolicygenerator +${project.version} + + + +org.apache.hadoop hadoop-yarn-services-core ${hadoop.version} http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/hadoop-yarn/bin/yarn -- diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn index 69afe6f..8061859 100755 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn @@ -39,6 +39,7 @@ function hadoop_usage hadoop_add_subcommand "container" client "prints container(s) report" hadoop_add_subcommand "daemonlog" admin "get/set the log level for each daemon" hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" + hadoop_add_subcommand "globalpolicygenerator" daemon "run the Global Policy Generator" hadoop_add_subcommand "jar " client "run a jar file" hadoop_add_subcommand "logs" client "dump container logs" hadoop_add_subcommand "node" admin "prints node report(s)" @@ -103,6 +104,10 @@ ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}" echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'" exit 0 ;; +globalpolicygenerator) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.globalpolicygenerator.GlobalPolicyGenerator' +;; jar) HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar ;; http://git-wip-us.apache.org/repos/asf/hadoop/blob/bca8e9bf/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd -- diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd index e1ac112..bebfd71 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd @@ -134,6 +134,10 @@ if "%1" == "--loglevel" ( set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-router\target\classes ) + if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes ( +set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes + ) + if exist %HADOOP_YARN_HOME%\build\test\classes ( set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes ) @@ -155,7 +159,7 @@ if "%1" == "--loglevel" ( set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar ^ application applicationattempt container node queue logs daemonlog historyserver ^ - timelineserver timelinereader router classpath + timelineserver timelinereader router globalpolicygenerator classpath for %%i in ( %yarncommands% ) do ( if %yarn-command%
[31/50] [abbrv] hadoop git commit: HDDS-78. Add per volume level storage stats in SCM. Contributed by Shashikant Banerjee.
HDDS-78. Add per volume level storage stats in SCM. Contributed by Shashikant Banerjee. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cf6e87f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cf6e87f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cf6e87f Branch: refs/heads/YARN-7402 Commit: 0cf6e87f9212af10eae39cdcb1fe60e6d8191772 Parents: f24c842 Author: Anu Engineer Authored: Sat May 26 11:06:22 2018 -0700 Committer: Anu Engineer Committed: Sat May 26 11:11:14 2018 -0700 -- .../placement/metrics/SCMNodeStat.java | 21 -- .../hdds/scm/node/SCMNodeStorageStatMXBean.java | 8 + .../hdds/scm/node/SCMNodeStorageStatMap.java| 230 +-- .../hdds/scm/node/StorageReportResult.java | 87 +++ .../scm/node/TestSCMNodeStorageStatMap.java | 141 +--- 5 files changed, 356 insertions(+), 131 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java index 4fe72fc..3c871d3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMNodeStat.java @@ -136,25 +136,4 @@ public class SCMNodeStat implements NodeStat { public int hashCode() { return Long.hashCode(capacity.get() ^ scmUsed.get() ^ remaining.get()); } - - - /** - * Truncate to 4 digits since uncontrolled precision is some times - * counter intuitive to what users expect. - * @param value - double. - * @return double. - */ - private double truncateDecimals(double value) { -final int multiplier = 1; -return (double) ((long) (value * multiplier)) / multiplier; - } - - /** - * get the scmUsed ratio - */ - public double getScmUsedratio() { -double scmUsedRatio = -truncateDecimals(getScmUsed().get() / (double) getCapacity().get()); -return scmUsedRatio; - } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java index f17a970..d81ff0f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMXBean.java @@ -19,7 +19,9 @@ package org.apache.hadoop.hdds.scm.node; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; +import java.util.Set; import java.util.UUID; /** @@ -66,4 +68,10 @@ public interface SCMNodeStorageStatMXBean { * @return long */ long getTotalFreeSpace(); + + /** + * Returns the set of disks for a given Datanode. + * @return set of storage volumes + */ + Set getStorageVolumes(UUID datanodeId); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cf6e87f/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java index 25cb357..f8ad2af 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java @@ -22,18 +22,18 @@ package org.apache.hadoop.hdds.scm.node; import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; -import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.protocol.proto. +StorageContainerDatanodeProtocolProtos.SCMStorageReport; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import
[44/50] [abbrv] hadoop git commit: YARN-8369. Javadoc build failed due to 'bad use of >'. (Takanobu Asanuma via wangda)
YARN-8369. Javadoc build failed due to 'bad use of >'. (Takanobu Asanuma via wangda) Change-Id: I79a42154e8f86ab1c3cc939b3745024b8eebe5f4 Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/17aa40f6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/17aa40f6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/17aa40f6 Branch: refs/heads/YARN-7402 Commit: 17aa40f669f197d43387d67dc00040d14cd00948 Parents: 3061bfc Author: Wangda Tan Authored: Tue May 29 09:27:36 2018 -0700 Committer: Wangda Tan Committed: Tue May 29 09:27:36 2018 -0700 -- .../apache/hadoop/yarn/util/resource/ResourceCalculator.java | 4 ++-- .../monitor/capacity/CapacitySchedulerPreemptionUtils.java | 8 2 files changed, 6 insertions(+), 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/17aa40f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java index 51078cd..27394f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java @@ -260,10 +260,10 @@ public abstract class ResourceCalculator { /** * Check if resource has any major resource types (which are all NodeManagers - * included) has a >0 value. + * included) has a {@literal >} 0 value. * * @param resource resource - * @return returns true if any resource is >0 + * @return returns true if any resource is {@literal >} 0 */ public abstract boolean isAnyMajorResourceAboveZero(Resource resource); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/17aa40f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java index 5396d61..690eb02 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/CapacitySchedulerPreemptionUtils.java @@ -136,12 +136,12 @@ public class CapacitySchedulerPreemptionUtils { * @param conservativeDRF * should we do conservativeDRF preemption or not. * When true: - *stop preempt container when any major resource type <= 0 for to- - *preempt. + *stop preempt container when any major resource type + *{@literal <=} 0 for to-preempt. *This is default preemption behavior of intra-queue preemption * When false: - *stop preempt container when: all major resource type <= 0 for - *to-preempt. + *stop preempt container when: all major resource type + *{@literal <=} 0 for to-preempt. *This is default preemption behavior of inter-queue preemption * @return should we preempt rmContainer. If we should, deduct from * resourceToObtainByPartition - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[07/50] [abbrv] hadoop git commit: YARN-8348. Incorrect and missing AfterClass in HBase-tests to fix NPE failures. Contributed by Giovanni Matteo Fumarola.
YARN-8348. Incorrect and missing AfterClass in HBase-tests to fix NPE failures. Contributed by Giovanni Matteo Fumarola. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7261561 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7261561 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7261561 Branch: refs/heads/YARN-7402 Commit: d72615611cfa6bd82756270d4b10136ec1e56741 Parents: e99e5bf Author: Inigo Goiri Authored: Wed May 23 14:43:59 2018 -0700 Committer: Inigo Goiri Committed: Wed May 23 14:43:59 2018 -0700 -- .../storage/TestHBaseTimelineStorageApps.java| 4 +++- .../storage/TestHBaseTimelineStorageDomain.java | 8 .../storage/TestHBaseTimelineStorageEntities.java| 4 +++- .../storage/TestHBaseTimelineStorageSchema.java | 8 .../storage/flow/TestHBaseStorageFlowActivity.java | 4 +++- .../storage/flow/TestHBaseStorageFlowRun.java| 4 +++- .../storage/flow/TestHBaseStorageFlowRunCompaction.java | 4 +++- 7 files changed, 31 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java index bc33427..0dee442 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageApps.java @@ -1936,6 +1936,8 @@ public class TestHBaseTimelineStorageApps { @AfterClass public static void tearDownAfterClass() throws Exception { -util.shutdownMiniCluster(); +if (util != null) { + util.shutdownMiniCluster(); +} } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java index 2932e0c..1f59088 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageDomain.java @@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.server.timelineservice.storage.common.ColumnRWHelp import org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainColumn; import org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainRowKey; import org.apache.hadoop.yarn.server.timelineservice.storage.domain.DomainTableRW; +import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -123,4 +124,11 @@ public class TestHBaseTimelineStorageDomain { assertEquals("user1,user2 group1,group2", readers); assertEquals("writer1,writer2", writers); } + + @AfterClass + public static void tearDownAfterClass() throws Exception { +if (util != null) { + util.shutdownMiniCluster(); +} + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7261561/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase-tests/src/test/java/org/apache/hadoop/yarn/server/timelineservice/storage/TestHBaseTimelineStorageEntities.java
[36/50] [abbrv] hadoop git commit: YARN-4781. Support intra-queue preemption for fairness ordering policy. Contributed by Eric Payne.
YARN-4781. Support intra-queue preemption for fairness ordering policy. Contributed by Eric Payne. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c343669 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c343669 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c343669 Branch: refs/heads/YARN-7402 Commit: 7c343669baf660df3b70d58987d6e68aec54d6fa Parents: 61df174 Author: Sunil G Authored: Mon May 28 16:32:53 2018 +0530 Committer: Sunil G Committed: Mon May 28 16:32:53 2018 +0530 -- .../FifoIntraQueuePreemptionPlugin.java | 37 ++- .../capacity/IntraQueueCandidatesSelector.java | 40 +++ .../monitor/capacity/TempAppPerPartition.java | 9 + .../AbstractComparatorOrderingPolicy.java | 2 - ...alCapacityPreemptionPolicyMockFramework.java | 12 +- ...yPreemptionPolicyIntraQueueFairOrdering.java | 276 +++ 6 files changed, 366 insertions(+), 10 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c343669/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java index 40f333f..12c178c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java @@ -34,6 +34,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.IntraQueueCandidatesSelector.TAFairOrderingComparator; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.IntraQueueCandidatesSelector.TAPriorityComparator; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy.IntraQueuePreemptionOrderPolicy; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; @@ -41,6 +42,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceUsage; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.FairOrderingPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.policy.OrderingPolicy; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; @@ -263,8 +266,17 @@ public class FifoIntraQueuePreemptionPlugin Resource queueReassignableResource, PriorityQueue orderedByPriority) { -Comparator reverseComp = Collections -.reverseOrder(new TAPriorityComparator()); +Comparator reverseComp; +OrderingPolicy queueOrderingPolicy = +tq.leafQueue.getOrderingPolicy(); +if (queueOrderingPolicy instanceof FairOrderingPolicy +&& (context.getIntraQueuePreemptionOrderPolicy() +== IntraQueuePreemptionOrderPolicy.USERLIMIT_FIRST)) { + reverseComp = Collections.reverseOrder( + new TAFairOrderingComparator(this.rc, clusterResource)); +} else { + reverseComp = Collections.reverseOrder(new TAPriorityComparator()); +} TreeSet orderedApps = new TreeSet<>(reverseComp); String partition = tq.partition; @@ -355,7 +367,16 @@ public class FifoIntraQueuePreemptionPlugin TempQueuePerPartition tq, Collection apps, Resource clusterResource, Map perUserAMUsed) { -TAPriorityComparator taComparator = new TAPriorityComparator(); +Comparator taComparator; +OrderingPolicy orderingPolicy = +tq.leafQueue.getOrderingPolicy(); +if (orderingPolicy instanceof FairOrderingPolicy +&&
[14/50] [abbrv] hadoop git commit: HDDS-45. Removal of old OzoneRestClient. Contributed by Lokesh Jain.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/774daa8d/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java -- diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java index 5b67657..a9b8175 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java @@ -23,23 +23,31 @@ import static org.apache.hadoop.fs.contract.ContractTestUtils.dataset; import static org.apache.hadoop.ozone.OzoneConsts.CHUNK_SIZE; import static org.junit.Assert.*; +import org.apache.commons.io.IOUtils; import org.apache.commons.lang.RandomStringUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.web.client.OzoneRestClient; +import org.apache.hadoop.hdds.client.OzoneQuota; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.ozone.client.VolumeArgs; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.web.client.OzoneBucket; -import org.apache.hadoop.ozone.web.client.OzoneVolume; -import org.apache.hadoop.ozone.web.request.OzoneQuota; import org.junit.rules.Timeout; +import java.io.IOException; +import java.io.InputStream; + /** * End-to-end testing of Ozone REST operations. */ @@ -52,7 +60,9 @@ public class TestOzoneRestWithMiniCluster { private static MiniOzoneCluster cluster; private static OzoneConfiguration conf; - private static OzoneRestClient ozoneClient; + private static ClientProtocol client; + private static ReplicationFactor replicationFactor = ReplicationFactor.ONE; + private static ReplicationType replicationType = ReplicationType.STAND_ALONE; @Rule public ExpectedException exception = ExpectedException.none(); @@ -62,180 +72,125 @@ public class TestOzoneRestWithMiniCluster { conf = new OzoneConfiguration(); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); -int port = cluster.getHddsDatanodes().get(0) -.getDatanodeDetails().getOzoneRestPort(); -ozoneClient = new OzoneRestClient( -String.format("http://localhost:%d;, port)); -ozoneClient.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER); +client = new RpcClient(conf); } @AfterClass - public static void shutdown() throws InterruptedException { + public static void shutdown() throws InterruptedException, IOException { if (cluster != null) { cluster.shutdown(); } -IOUtils.cleanupWithLogger(null, ozoneClient); +client.close(); } @Test public void testCreateAndGetVolume() throws Exception { -String volumeName = nextId("volume"); -OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", "100TB"); -assertNotNull(volume); -assertEquals(volumeName, volume.getVolumeName()); -assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby()); -assertEquals("bilbo", volume.getOwnerName()); -assertNotNull(volume.getQuota()); -assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(), -volume.getQuota().sizeInBytes()); -volume = ozoneClient.getVolume(volumeName); -assertNotNull(volume); -assertEquals(volumeName, volume.getVolumeName()); -assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby()); -assertEquals("bilbo", volume.getOwnerName()); -assertNotNull(volume.getQuota()); -assertEquals(OzoneQuota.parseQuota("100TB").sizeInBytes(), -volume.getQuota().sizeInBytes()); +createAndGetVolume(); } @Test public void testCreateAndGetBucket() throws Exception { -String volumeName = nextId("volume"); -String bucketName = nextId("bucket"); -OzoneVolume volume = ozoneClient.createVolume(volumeName, "bilbo", "100TB"); -assertNotNull(volume); -assertEquals(volumeName, volume.getVolumeName()); -assertEquals(ozoneClient.getUserAuth(), volume.getCreatedby()); -assertEquals("bilbo", volume.getOwnerName()); -
[08/50] [abbrv] hadoop git commit: YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. Contributed by Giovanni Matteo Fumarola.
YARN-8327. Fix TestAggregatedLogFormat#testReadAcontainerLogs1 on Windows. Contributed by Giovanni Matteo Fumarola. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f09dc730 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f09dc730 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f09dc730 Branch: refs/heads/YARN-7402 Commit: f09dc73001fd5f3319765fa997f4b0ca9e8f2aff Parents: d726156 Author: Inigo Goiri Authored: Wed May 23 15:59:30 2018 -0700 Committer: Inigo Goiri Committed: Wed May 23 15:59:30 2018 -0700 -- .../logaggregation/TestAggregatedLogFormat.java | 19 --- 1 file changed, 12 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f09dc730/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java index efbaa4c..f85445e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestAggregatedLogFormat.java @@ -254,13 +254,18 @@ public class TestAggregatedLogFormat { // Since we could not open the fileInputStream for stderr, this file is not // aggregated. String s = writer.toString(); -int expectedLength = -"LogType:stdout".length() -+ (logUploadedTime ? ("\nLog Upload Time:" + Times.format(System - .currentTimeMillis())).length() : 0) -+ ("\nLogLength:" + numChars).length() -+ "\nLog Contents:\n".length() + numChars + "\n".length() -+ "\nEnd of LogType:stdout\n".length(); + +int expectedLength = "LogType:stdout".length() ++ (logUploadedTime +? (System.lineSeparator() + "Log Upload Time:" ++ Times.format(System.currentTimeMillis())).length() +: 0) ++ (System.lineSeparator() + "LogLength:" + numChars).length() ++ (System.lineSeparator() + "Log Contents:" + System.lineSeparator()) +.length() ++ numChars + ("\n").length() + ("End of LogType:stdout" ++ System.lineSeparator() + System.lineSeparator()).length(); + Assert.assertTrue("LogType not matched", s.contains("LogType:stdout")); Assert.assertTrue("log file:stderr should not be aggregated.", !s.contains("LogType:stderr")); Assert.assertTrue("log file:logs should not be aggregated.", !s.contains("LogType:logs")); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[37/50] [abbrv] hadoop git commit: HDFS-13627. TestErasureCodingExerciseAPIs fails on Windows. Contributed by Anbang Hu.
HDFS-13627. TestErasureCodingExerciseAPIs fails on Windows. Contributed by Anbang Hu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91d7c74e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91d7c74e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91d7c74e Branch: refs/heads/YARN-7402 Commit: 91d7c74e6aa4850922f68bab490b585443e4fccb Parents: 7c34366 Author: Inigo Goiri Authored: Mon May 28 10:26:47 2018 -0700 Committer: Inigo Goiri Committed: Mon May 28 10:26:47 2018 -0700 -- .../org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java | 5 - 1 file changed, 4 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/91d7c74e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java index 4335527..c63ba34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestErasureCodingExerciseAPIs.java @@ -40,6 +40,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.DataOutputStream; +import java.io.File; import java.io.IOException; import java.nio.file.Paths; import java.security.NoSuchAlgorithmException; @@ -91,8 +92,10 @@ public class TestErasureCodingExerciseAPIs { // Set up java key store String testRootDir = Paths.get(new FileSystemTestHelper().getTestRootDir()) .toString(); +Path targetFile = new Path(new File(testRootDir).getAbsolutePath(), +"test.jks"); String keyProviderURI = JavaKeyStoreProvider.SCHEME_NAME + "://file" -+ new Path(testRootDir, "test.jks").toUri(); ++ targetFile.toUri(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH, keyProviderURI); conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[40/50] [abbrv] hadoop git commit: HADOOP-15455. Incorrect debug message in KMSACL#hasAccess. Contributed by Yuen-Kuei Hsueh.
HADOOP-15455. Incorrect debug message in KMSACL#hasAccess. Contributed by Yuen-Kuei Hsueh. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/438ef495 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/438ef495 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/438ef495 Branch: refs/heads/YARN-7402 Commit: 438ef4951a38171f193eaf2631da31d0f4bc3c62 Parents: 8fdc993 Author: Wei-Chiu Chuang Authored: Mon May 28 17:32:32 2018 -0700 Committer: Wei-Chiu Chuang Committed: Mon May 28 17:32:32 2018 -0700 -- .../java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java| 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/438ef495/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java -- diff --git a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java index b02f34e..17faec2 100644 --- a/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java +++ b/hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSACLs.java @@ -247,9 +247,9 @@ public class KMSACLs implements Runnable, KeyACLs { if (blacklist == null) { LOG.debug("No blacklist for {}", type.toString()); } else if (access) { - LOG.debug("user is in {}" , blacklist.getAclString()); -} else { LOG.debug("user is not in {}" , blacklist.getAclString()); +} else { + LOG.debug("user is in {}" , blacklist.getAclString()); } } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[09/50] [abbrv] hadoop git commit: YARN-4599. Set OOM control for memory cgroups. (Miklos Szegedi via Haibo Chen)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9964799/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java new file mode 100644 index 000..118d172 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupElasticMemoryController.java @@ -0,0 +1,319 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.nodemanager.containermanager.linux.resources; + +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.junit.Test; + +import java.io.File; +import java.nio.charset.Charset; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +import static org.junit.Assert.assertTrue; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Test for elastic non-strict memory controller based on cgroups. + */ +public class TestCGroupElasticMemoryController { + private YarnConfiguration conf = new YarnConfiguration(); + private File script = new File("target/" + + TestCGroupElasticMemoryController.class.getName()); + + /** + * Test that at least one memory type is requested. + * @throws YarnException on exception + */ + @Test(expected = YarnException.class) + public void testConstructorOff() + throws YarnException { +CGroupElasticMemoryController controller = +new CGroupElasticMemoryController( +conf, +null, +null, +false, +false, +1 +); + } + + /** + * Test that the OOM logic is pluggable. + * @throws YarnException on exception + */ + @Test + public void testConstructorHandler() + throws YarnException { +conf.setClass(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_HANDLER, +DummyRunnableWithContext.class, Runnable.class); +CGroupsHandler handler = mock(CGroupsHandler.class); +when(handler.getPathForCGroup(any(), any())).thenReturn(""); +CGroupElasticMemoryController controller = +new CGroupElasticMemoryController( +conf, +null, +handler, +true, +false, +1 +); + } + + /** + * Test that the handler is notified about multiple OOM events. + * @throws Exception on exception + */ + @Test + public void testMultipleOOMEvents() throws Exception { +conf.set(YarnConfiguration.NM_ELASTIC_MEMORY_CONTROL_OOM_LISTENER_PATH, +script.getAbsolutePath()); +try { + FileUtils.writeStringToFile(script, + "#!/bin/bash\nprintf oomevent;printf oomevent;\n", + Charset.defaultCharset(), false); + assertTrue("Could not set executable", + script.setExecutable(true)); + + CGroupsHandler cgroups = mock(CGroupsHandler.class); + when(cgroups.getPathForCGroup(any(), any())).thenReturn(""); + when(cgroups.getCGroupParam(any(), any(), any())) + .thenReturn("under_oom 0"); + + Runnable handler = mock(Runnable.class); + doNothing().when(handler).run(); + +
[22/50] [abbrv] hadoop git commit: HADOOP-15494. TestRawLocalFileSystemContract fails on Windows. Contributed by Anbang Hu.
HADOOP-15494. TestRawLocalFileSystemContract fails on Windows. Contributed by Anbang Hu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bddfe796 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bddfe796 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bddfe796 Branch: refs/heads/YARN-7402 Commit: bddfe796f2f992fc1dcc8a1dd44d64ff2b3c9cf4 Parents: 86bc642 Author: Steve Loughran Authored: Fri May 25 11:12:47 2018 +0100 Committer: Steve Loughran Committed: Fri May 25 11:12:47 2018 +0100 -- .../java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddfe796/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java index ebf9ea7..908e330 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestRawLocalFileSystemContract.java @@ -42,7 +42,7 @@ public class TestRawLocalFileSystemContract extends FileSystemContractBaseTest { private static final Logger LOG = LoggerFactory.getLogger(TestRawLocalFileSystemContract.class); private final static Path TEST_BASE_DIR = - new Path(GenericTestUtils.getTempPath("")); + new Path(GenericTestUtils.getRandomizedTestDir().getAbsolutePath()); @Before public void setUp() throws Exception { - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[01/50] [abbrv] hadoop git commit: HADOOP-15457. Add Security-Related HTTP Response Header in WEBUIs. (kanwaljeets via rkanter) [Forced Update!]
Repository: hadoop Updated Branches: refs/heads/YARN-7402 db183f2ea -> c5bf22dc1 (forced update) HADOOP-15457. Add Security-Related HTTP Response Header in WEBUIs. (kanwaljeets via rkanter) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa23d49f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa23d49f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa23d49f Branch: refs/heads/YARN-7402 Commit: aa23d49fc8b9c2537529dbdc13512000e2ab295a Parents: bc6d9d4 Author: Robert Kanter Authored: Wed May 23 10:23:17 2018 -0700 Committer: Robert Kanter Committed: Wed May 23 10:24:09 2018 -0700 -- .../org/apache/hadoop/http/HttpServer2.java | 79 +++- .../org/apache/hadoop/http/TestHttpServer.java | 61 +++ 2 files changed, 121 insertions(+), 19 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa23d49f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index 47ca841..c273c78 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -34,6 +34,8 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.regex.Matcher; +import java.util.regex.Pattern; import javax.servlet.Filter; import javax.servlet.FilterChain; @@ -172,10 +174,16 @@ public final class HttpServer2 implements FilterContainer { private final SignerSecretProvider secretProvider; private XFrameOption xFrameOption; private boolean xFrameOptionIsEnabled; - private static final String X_FRAME_VALUE = "xFrameOption"; - private static final String X_FRAME_ENABLED = "X_FRAME_ENABLED"; - - + public static final String HTTP_HEADER_PREFIX = "hadoop.http.header."; + private static final String HTTP_HEADER_REGEX = + "hadoop\\.http\\.header\\.([a-zA-Z\\-_]+)"; + static final String X_XSS_PROTECTION = + "X-XSS-Protection:1; mode=block"; + static final String X_CONTENT_TYPE_OPTIONS = + "X-Content-Type-Options:nosniff"; + private static final String X_FRAME_OPTIONS = "X-FRAME-OPTIONS"; + private static final Pattern PATTERN_HTTP_HEADER_REGEX = + Pattern.compile(HTTP_HEADER_REGEX); /** * Class to construct instances of HTTP server with specific options. */ @@ -574,10 +582,7 @@ public final class HttpServer2 implements FilterContainer { addDefaultApps(contexts, appDir, conf); webServer.setHandler(handlers); -Map xFrameParams = new HashMap<>(); -xFrameParams.put(X_FRAME_ENABLED, -String.valueOf(this.xFrameOptionIsEnabled)); -xFrameParams.put(X_FRAME_VALUE, this.xFrameOption.toString()); +Map xFrameParams = setHeaders(conf); addGlobalFilter("safety", QuotingInputFilter.class.getName(), xFrameParams); final FilterInitializer[] initializers = getFilterInitializers(conf); if (initializers != null) { @@ -1475,9 +1480,11 @@ public final class HttpServer2 implements FilterContainer { public static class QuotingInputFilter implements Filter { private FilterConfig config; +private Map headerMap; public static class RequestQuoter extends HttpServletRequestWrapper { private final HttpServletRequest rawRequest; + public RequestQuoter(HttpServletRequest rawRequest) { super(rawRequest); this.rawRequest = rawRequest; @@ -1566,6 +1573,7 @@ public final class HttpServer2 implements FilterContainer { @Override public void init(FilterConfig config) throws ServletException { this.config = config; + initHttpHeaderMap(); } @Override @@ -1593,11 +1601,7 @@ public final class HttpServer2 implements FilterContainer { } else if (mime.startsWith("application/xml")) { httpResponse.setContentType("text/xml; charset=utf-8"); } - - if(Boolean.valueOf(this.config.getInitParameter(X_FRAME_ENABLED))) { -httpResponse.addHeader("X-FRAME-OPTIONS", -this.config.getInitParameter(X_FRAME_VALUE)); - } + headerMap.forEach((k, v) -> httpResponse.addHeader(k, v)); chain.doFilter(quoted, httpResponse); } @@ -1613,14 +1617,25 @@ public final class HttpServer2 implements FilterContainer { return (mime == null) ? null : mime; } +private void initHttpHeaderMap() { + Enumeration params = this.config.getInitParameterNames(); +
[42/50] [abbrv] hadoop git commit: HADOOP-15497. TestTrash should use proper test path to avoid failing on Windows. Contributed by Anbang Hu.
HADOOP-15497. TestTrash should use proper test path to avoid failing on Windows. Contributed by Anbang Hu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3c75f8e4 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3c75f8e4 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3c75f8e4 Branch: refs/heads/YARN-7402 Commit: 3c75f8e4933221fa60a87e86a3db5e4727530b6f Parents: 31ab960 Author: Inigo Goiri Authored: Tue May 29 09:11:08 2018 -0700 Committer: Inigo Goiri Committed: Tue May 29 09:11:08 2018 -0700 -- .../src/test/java/org/apache/hadoop/fs/TestTrash.java | 10 ++ 1 file changed, 6 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3c75f8e4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java index 12aed29..fa2d21f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java @@ -49,9 +49,11 @@ import org.apache.hadoop.util.Time; */ public class TestTrash { - private final static Path TEST_DIR = new Path(GenericTestUtils.getTempPath( + private final static File BASE_PATH = new File(GenericTestUtils.getTempPath( "testTrash")); + private final static Path TEST_DIR = new Path(BASE_PATH.getAbsolutePath()); + @Before public void setUp() throws IOException { // ensure each test initiates a FileSystem instance, @@ -682,7 +684,7 @@ public class TestTrash { static class TestLFS extends LocalFileSystem { Path home; TestLFS() { - this(new Path(TEST_DIR, "user/test")); + this(TEST_DIR); } TestLFS(final Path home) { super(new RawLocalFileSystem() { @@ -809,8 +811,8 @@ public class TestTrash { */ public static void verifyTrashPermission(FileSystem fs, Configuration conf) throws IOException { -Path caseRoot = new Path( -GenericTestUtils.getTempPath("testTrashPermission")); +Path caseRoot = new Path(BASE_PATH.getPath(), +"testTrashPermission"); try (FileSystem fileSystem = fs){ Trash trash = new Trash(fileSystem, conf); FileSystemTestWrapper wrapper = - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[24/50] [abbrv] hadoop git commit: HADOOP-15473. Configure serialFilter in KeyProvider to avoid UnrecoverableKeyException caused by JDK-8189997. Contributed by Gabor Bota.
HADOOP-15473. Configure serialFilter in KeyProvider to avoid UnrecoverableKeyException caused by JDK-8189997. Contributed by Gabor Bota. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02322de3 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02322de3 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02322de3 Branch: refs/heads/YARN-7402 Commit: 02322de3f95ba78a22c057037ef61aa3ab1d3824 Parents: 8d5509c Author: Xiao Chen Authored: Fri May 25 09:08:15 2018 -0700 Committer: Xiao Chen Committed: Fri May 25 09:10:51 2018 -0700 -- .../apache/hadoop/crypto/key/KeyProvider.java | 18 +++ .../fs/CommonConfigurationKeysPublic.java | 7 ++ .../src/main/resources/core-default.xml | 23 3 files changed, 48 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/02322de3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java index 5d670e5..050540b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/KeyProvider.java @@ -42,6 +42,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import javax.crypto.KeyGenerator; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER; + /** * A provider of secret key material for Hadoop applications. Provides an * abstraction to separate key storage from users of encryption. It @@ -61,6 +63,14 @@ public abstract class KeyProvider { CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_DEFAULT_BITLENGTH_KEY; public static final int DEFAULT_BITLENGTH = CommonConfigurationKeysPublic. HADOOP_SECURITY_KEY_DEFAULT_BITLENGTH_DEFAULT; + public static final String JCEKS_KEY_SERIALFILTER_DEFAULT = + "java.lang.Enum;" + + "java.security.KeyRep;" + + "java.security.KeyRep$Type;" + + "javax.crypto.spec.SecretKeySpec;" + + "org.apache.hadoop.crypto.key.JavaKeyStoreProvider$KeyMetadata;" + + "!*"; + public static final String JCEKS_KEY_SERIAL_FILTER = "jceks.key.serialFilter"; private final Configuration conf; @@ -394,6 +404,14 @@ public abstract class KeyProvider { */ public KeyProvider(Configuration conf) { this.conf = new Configuration(conf); +// Added for HADOOP-15473. Configured serialFilter property fixes +// java.security.UnrecoverableKeyException in JDK 8u171. +if(System.getProperty(JCEKS_KEY_SERIAL_FILTER) == null) { + String serialFilter = + conf.get(HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER, + JCEKS_KEY_SERIALFILTER_DEFAULT); + System.setProperty(JCEKS_KEY_SERIAL_FILTER, serialFilter); +} } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/02322de3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 8837cfb..9e0ba20 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -662,6 +662,13 @@ public class CommonConfigurationKeysPublic { * * core-default.xml */ + public static final String HADOOP_SECURITY_CRYPTO_JCEKS_KEY_SERIALFILTER = + "hadoop.security.crypto.jceks.key.serialfilter"; + /** + * @see + * + * core-default.xml + */ public static final String HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY = "hadoop.security.crypto.buffer.size"; /** Defalt value for HADOOP_SECURITY_CRYPTO_BUFFER_SIZE_KEY */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/02322de3/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml -- diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index fad2985..9564587 100644 ---
[13/50] [abbrv] hadoop git commit: YARN-8319. More YARN pages need to honor yarn.resourcemanager.display.per-user-apps. Contributed by Sunil G.
YARN-8319. More YARN pages need to honor yarn.resourcemanager.display.per-user-apps. Contributed by Sunil G. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c05b5d42 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c05b5d42 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c05b5d42 Branch: refs/heads/YARN-7402 Commit: c05b5d424b000bab766f57e88a07f2b4e9a56647 Parents: 4cc0c9b Author: Rohith Sharma K S Authored: Thu May 24 14:19:46 2018 +0530 Committer: Rohith Sharma K S Committed: Thu May 24 14:19:46 2018 +0530 -- .../hadoop/yarn/conf/YarnConfiguration.java | 11 +++- .../yarn/conf/TestYarnConfigurationFields.java | 2 + .../src/main/resources/yarn-default.xml | 2 +- .../nodemanager/webapp/NMWebServices.java | 63 +- .../webapp/TestNMWebServicesApps.java | 68 +--- .../server/resourcemanager/ClientRMService.java | 10 +-- .../resourcemanager/webapp/RMWebServices.java | 8 +-- .../reader/TimelineReaderWebServices.java | 33 ++ 8 files changed, 175 insertions(+), 22 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c05b5d42/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 6d08831..004a59f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -121,6 +121,10 @@ public class YarnConfiguration extends Configuration { new DeprecationDelta(RM_ZK_RETRY_INTERVAL_MS, CommonConfigurationKeys.ZK_RETRY_INTERVAL_MS), }); +Configuration.addDeprecations(new DeprecationDelta[] { +new DeprecationDelta("yarn.resourcemanager.display.per-user-apps", +FILTER_ENTITY_LIST_BY_USER) +}); } //Configurations @@ -3569,11 +3573,16 @@ public class YarnConfiguration extends Configuration { public static final String NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_SCRIPT_OPTS = NM_SCRIPT_BASED_NODE_LABELS_PROVIDER_PREFIX + "opts"; - /* + /** * Support to view apps for given user in secure cluster. + * @deprecated This field is deprecated for {@link #FILTER_ENTITY_LIST_BY_USER} */ + @Deprecated public static final String DISPLAY_APPS_FOR_LOGGED_IN_USER = RM_PREFIX + "display.per-user-apps"; + + public static final String FILTER_ENTITY_LIST_BY_USER = + "yarn.webapp.filter-entity-list-by-user"; public static final boolean DEFAULT_DISPLAY_APPS_FOR_LOGGED_IN_USER = false; http://git-wip-us.apache.org/repos/asf/hadoop/blob/c05b5d42/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java index f4d1ac0..b9ba543 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/test/java/org/apache/hadoop/yarn/conf/TestYarnConfigurationFields.java @@ -182,6 +182,8 @@ public class TestYarnConfigurationFields extends TestConfigurationFieldsBase { // Ignore deprecated properties configurationPrefixToSkipCompare .add(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS); +configurationPrefixToSkipCompare +.add(YarnConfiguration.DISPLAY_APPS_FOR_LOGGED_IN_USER); // Allocate for usage xmlPropsToSkipCompare = new HashSet(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/c05b5d42/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index da44ccb..c82474c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++
[39/50] [abbrv] hadoop git commit: HADOOP-15498. TestHadoopArchiveLogs (#testGenerateScript, #testPrepareWorkingDir) fails on Windows. Contributed by Anbang Hu.
HADOOP-15498. TestHadoopArchiveLogs (#testGenerateScript, #testPrepareWorkingDir) fails on Windows. Contributed by Anbang Hu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fdc993a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fdc993a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fdc993a Branch: refs/heads/YARN-7402 Commit: 8fdc993a993728c65084d7dc3ac469059cb1f603 Parents: 9dbf4f0 Author: Inigo Goiri Authored: Mon May 28 16:45:42 2018 -0700 Committer: Inigo Goiri Committed: Mon May 28 16:45:42 2018 -0700 -- .../org/apache/hadoop/tools/TestHadoopArchiveLogs.java | 12 1 file changed, 8 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fdc993a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java -- diff --git a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java index 2ddd4c5..a1b662c 100644 --- a/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java +++ b/hadoop-tools/hadoop-archive-logs/src/test/java/org/apache/hadoop/tools/TestHadoopArchiveLogs.java @@ -25,6 +25,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; @@ -278,7 +279,7 @@ public class TestHadoopArchiveLogs { hal.generateScript(localScript); Assert.assertTrue(localScript.exists()); String script = IOUtils.toString(localScript.toURI()); -String[] lines = script.split(System.lineSeparator()); +String[] lines = script.split("\n"); Assert.assertEquals(22, lines.length); Assert.assertEquals("#!/bin/bash", lines[0]); Assert.assertEquals("set -e", lines[1]); @@ -368,7 +369,8 @@ public class TestHadoopArchiveLogs { Assert.assertTrue(dirPrepared); Assert.assertTrue(fs.exists(workingDir)); Assert.assertEquals( -new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true), +new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, +!Shell.WINDOWS), fs.getFileStatus(workingDir).getPermission()); // Throw a file in the dir Path dummyFile = new Path(workingDir, "dummy.txt"); @@ -381,7 +383,8 @@ public class TestHadoopArchiveLogs { Assert.assertTrue(fs.exists(workingDir)); Assert.assertTrue(fs.exists(dummyFile)); Assert.assertEquals( -new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true), +new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, +!Shell.WINDOWS), fs.getFileStatus(workingDir).getPermission()); // -force is true and the dir exists, so it will recreate it and the dummy // won't exist anymore @@ -390,7 +393,8 @@ public class TestHadoopArchiveLogs { Assert.assertTrue(dirPrepared); Assert.assertTrue(fs.exists(workingDir)); Assert.assertEquals( -new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, true), +new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL, +!Shell.WINDOWS), fs.getFileStatus(workingDir).getPermission()); Assert.assertFalse(fs.exists(dummyFile)); } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[20/50] [abbrv] hadoop git commit: YARN-8357. Fixed NPE when YARN service is saved and not deployed. Contributed by Chandni Singh
YARN-8357. Fixed NPE when YARN service is saved and not deployed. Contributed by Chandni Singh Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d9852eb5 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d9852eb5 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d9852eb5 Branch: refs/heads/YARN-7402 Commit: d9852eb5897a25323ab0302c2c0decb61d310e5e Parents: 7ff5a40 Author: Eric Yang Authored: Thu May 24 16:32:13 2018 -0400 Committer: Eric Yang Committed: Thu May 24 16:32:13 2018 -0400 -- .../java/org/apache/hadoop/yarn/service/client/ServiceClient.java | 1 + 1 file changed, 1 insertion(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d9852eb5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index 93a74e3..0ab3322 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -1198,6 +1198,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes, ServiceApiUtil.validateNameFormat(serviceName, getConfig()); Service appSpec = new Service(); appSpec.setName(serviceName); +appSpec.setState(ServiceState.STOPPED); ApplicationId currentAppId = getAppId(serviceName); if (currentAppId == null) { LOG.info("Service {} does not have an application ID", serviceName); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[18/50] [abbrv] hadoop git commit: HDDS-80. Remove SendContainerCommand from SCM. Contributed by Nanda Kumar.
HDDS-80. Remove SendContainerCommand from SCM. Contributed by Nanda Kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d19e7d0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d19e7d0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d19e7d0 Branch: refs/heads/YARN-7402 Commit: 2d19e7d08f031341078a36fee74860c58de02993 Parents: c9b63de Author: Xiaoyu Yao Authored: Thu May 24 11:10:30 2018 -0700 Committer: Xiaoyu Yao Committed: Thu May 24 11:10:30 2018 -0700 -- .../statemachine/DatanodeStateMachine.java | 3 - .../commandhandler/ContainerReportHandler.java | 114 --- .../states/endpoint/HeartbeatEndpointTask.java | 5 - .../protocol/commands/SendContainerCommand.java | 80 - .../StorageContainerDatanodeProtocol.proto | 16 ++- .../container/replication/InProgressPool.java | 57 -- .../scm/server/SCMDatanodeProtocolServer.java | 7 -- 7 files changed, 7 insertions(+), 275 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d19e7d0/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index a16bfdc..a8fe494 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -26,8 +26,6 @@ import org.apache.hadoop.ozone.container.common.statemachine.commandhandler import org.apache.hadoop.ozone.container.common.statemachine.commandhandler .CommandDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.commandhandler -.ContainerReportHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler .DeleteBlocksCommandHandler; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; @@ -88,7 +86,6 @@ public class DatanodeStateMachine implements Closeable { // When we add new handlers just adding a new handler here should do the // trick. commandDispatcher = CommandDispatcher.newBuilder() -.addHandler(new ContainerReportHandler()) .addHandler(new CloseContainerHandler()) .addHandler(new DeleteBlocksCommandHandler( container.getContainerManager(), conf)) http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d19e7d0/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java deleted file mode 100644 index fbea290..000 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ContainerReportHandler.java +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; - -import org.apache.hadoop.hdds.protocol.proto -.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto; -import org.apache.hadoop.hdds.protocol.proto -.StorageContainerDatanodeProtocolProtos.SCMCmdType; -import org.apache.hadoop.ozone.container.common.statemachine -
[30/50] [abbrv] hadoop git commit: YARN-8213. Add Capacity Scheduler performance metrics. (Weiwei Yang via wangda)
YARN-8213. Add Capacity Scheduler performance metrics. (Weiwei Yang via wangda) Change-Id: Ieea6f3eeb83c90cd74233fea896f0fcd0f325d5f Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f24c842d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f24c842d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f24c842d Branch: refs/heads/YARN-7402 Commit: f24c842d52e166e8566337ef93c96438f1c870d8 Parents: 8605a38 Author: Wangda Tan Authored: Fri May 25 21:53:20 2018 -0700 Committer: Wangda Tan Committed: Fri May 25 21:53:20 2018 -0700 -- .../server/resourcemanager/ResourceManager.java | 1 + .../scheduler/AbstractYarnScheduler.java| 5 + .../scheduler/ResourceScheduler.java| 5 + .../scheduler/capacity/CapacityScheduler.java | 31 - .../capacity/CapacitySchedulerMetrics.java | 119 +++ .../TestCapacitySchedulerMetrics.java | 110 + 6 files changed, 269 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 05745ec..c533111 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -1216,6 +1216,7 @@ public class ResourceManager extends CompositeService implements Recoverable { void reinitialize(boolean initialize) { ClusterMetrics.destroy(); QueueMetrics.clearQueueMetrics(); +getResourceScheduler().resetSchedulerMetrics(); if (initialize) { resetRMContext(); createAndInitActiveServices(true); http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java index b2747f7..18c7b4e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java @@ -1464,4 +1464,9 @@ public abstract class AbstractYarnScheduler SchedulingRequest schedulingRequest, SchedulerNode schedulerNode) { return false; } + + @Override + public void resetSchedulerMetrics() { +// reset scheduler metrics + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/f24c842d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java index 5a56ac7..dcb6edd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java +++
[03/50] [abbrv] hadoop git commit: HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu.
HDFS-13587. TestQuorumJournalManager fails on Windows. Contributed by Anbang Hu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c13dea87 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c13dea87 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c13dea87 Branch: refs/heads/YARN-7402 Commit: c13dea87d9de7a9872fc8b0c939b41b1666a61e5 Parents: 51ce02b Author: Inigo Goiri Authored: Wed May 23 11:36:03 2018 -0700 Committer: Inigo Goiri Committed: Wed May 23 11:36:03 2018 -0700 -- .../org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java | 5 + .../hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c13dea87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java index 2314e22..f936d75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/MiniJournalCluster.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.qjournal.client.QuorumJournalManager; import org.apache.hadoop.hdfs.qjournal.server.JournalNode; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetUtils; import com.google.common.base.Joiner; @@ -50,6 +51,10 @@ public class MiniJournalCluster { private int numJournalNodes = 3; private boolean format = true; private final Configuration conf; + +static { + DefaultMetricsSystem.setMiniClusterMode(true); +} public Builder(Configuration conf) { this.conf = conf; http://git-wip-us.apache.org/repos/asf/hadoop/blob/c13dea87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java index 34a0348..69856ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/client/TestQuorumJournalManager.java @@ -93,7 +93,8 @@ public class TestQuorumJournalManager { conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 0); cluster = new MiniJournalCluster.Builder(conf) - .build(); +.baseDir(GenericTestUtils.getRandomizedTestDir().getAbsolutePath()) +.build(); cluster.waitActive(); qjm = createSpyingQJM(); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[28/50] [abbrv] hadoop git commit: HDDS-113. Rest and Rpc Client should verify resource name using HddsClientUtils. Contributed by Lokesh Jain.
HDDS-113. Rest and Rpc Client should verify resource name using HddsClientUtils. Contributed by Lokesh Jain. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a9652e6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a9652e6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a9652e6 Branch: refs/heads/YARN-7402 Commit: 2a9652e69650973f6158b60ff131215827738db6 Parents: 13d2528 Author: Anu Engineer Authored: Fri May 25 15:40:46 2018 -0700 Committer: Anu Engineer Committed: Fri May 25 15:45:50 2018 -0700 -- .../hadoop/hdds/scm/client/HddsClientUtils.java | 23 + .../apache/hadoop/ozone/client/ObjectStore.java | 9 .../apache/hadoop/ozone/client/OzoneBucket.java | 24 + .../apache/hadoop/ozone/client/OzoneVolume.java | 18 +-- .../hadoop/ozone/client/rest/RestClient.java| 52 .../hadoop/ozone/client/rpc/RpcClient.java | 46 +++-- 6 files changed, 64 insertions(+), 108 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java -- diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java index bc5f8d6..a6813eb 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java @@ -170,6 +170,29 @@ public final class HddsClientUtils { } /** + * verifies that bucket / volume name is a valid DNS name. + * + * @param resourceNames Array of bucket / volume names to be verified. + */ + public static void verifyResourceName(String... resourceNames) { +for (String resourceName : resourceNames) { + HddsClientUtils.verifyResourceName(resourceName); +} + } + + /** + * Checks that object parameters passed as reference is not null. + * + * @param references Array of object references to be checked. + * @param + */ + public static void checkNotNull(T... references) { +for (T ref: references) { + Preconditions.checkNotNull(ref); +} + } + + /** * Returns the cache value to be used for list calls. * @param conf Configuration object * @return list cache size http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java -- diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index d8b3011..c5f0689 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -63,8 +63,6 @@ public class ObjectStore { * @throws IOException */ public void createVolume(String volumeName) throws IOException { -Preconditions.checkNotNull(volumeName); -HddsClientUtils.verifyResourceName(volumeName); proxy.createVolume(volumeName); } @@ -76,9 +74,6 @@ public class ObjectStore { */ public void createVolume(String volumeName, VolumeArgs volumeArgs) throws IOException { -Preconditions.checkNotNull(volumeName); -Preconditions.checkNotNull(volumeArgs); -HddsClientUtils.verifyResourceName(volumeName); proxy.createVolume(volumeName, volumeArgs); } @@ -89,8 +84,6 @@ public class ObjectStore { * @throws IOException */ public OzoneVolume getVolume(String volumeName) throws IOException { -Preconditions.checkNotNull(volumeName); -HddsClientUtils.verifyResourceName(volumeName); OzoneVolume volume = proxy.getVolumeDetails(volumeName); return volume; } @@ -150,8 +143,6 @@ public class ObjectStore { * @throws IOException */ public void deleteVolume(String volumeName) throws IOException { -Preconditions.checkNotNull(volumeName); -HddsClientUtils.verifyResourceName(volumeName); proxy.deleteVolume(volumeName); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a9652e6/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java -- diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 5df0254..2f3cff6 100644 ---
[12/50] [abbrv] hadoop git commit: YARN-8346. Upgrading to 3.1 kills running containers with error 'Opportunistic container queue is full'. Contributed by Jason Lowe.
YARN-8346. Upgrading to 3.1 kills running containers with error 'Opportunistic container queue is full'. Contributed by Jason Lowe. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cc0c9b0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cc0c9b0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cc0c9b0 Branch: refs/heads/YARN-7402 Commit: 4cc0c9b0baa93f5a1c0623eee353874e858a7caa Parents: 7a87add Author: Rohith Sharma K S Authored: Thu May 24 12:23:47 2018 +0530 Committer: Rohith Sharma K S Committed: Thu May 24 12:23:47 2018 +0530 -- .../yarn/security/ContainerTokenIdentifier.java | 4 ++-- .../yarn/security/TestYARNTokenIdentifier.java | 25 2 files changed, 27 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cc0c9b0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java index 37c74b8..8dea65f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java @@ -292,7 +292,7 @@ public class ContainerTokenIdentifier extends TokenIdentifier { */ public ContainerType getContainerType(){ if (!proto.hasContainerType()) { - return null; + return ContainerType.TASK; } return convertFromProtoFormat(proto.getContainerType()); } @@ -303,7 +303,7 @@ public class ContainerTokenIdentifier extends TokenIdentifier { */ public ExecutionType getExecutionType(){ if (!proto.hasExecutionType()) { - return null; + return ExecutionType.GUARANTEED; } return convertFromProtoFormat(proto.getExecutionType()); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cc0c9b0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java index 51fbe9a..8109b5e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/security/TestYARNTokenIdentifier.java @@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.impl.pb.LogAggregationContextPBImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; +import org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.ContainerTokenIdentifierProto; import org.apache.hadoop.yarn.proto.YarnSecurityTokenProtos.YARNDelegationTokenIdentifierProto; import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; @@ -170,6 +171,30 @@ public class TestYARNTokenIdentifier { } @Test + public void testContainerTokenIdentifierProtoMissingFields() + throws IOException { +ContainerTokenIdentifierProto.Builder builder = +ContainerTokenIdentifierProto.newBuilder(); +ContainerTokenIdentifierProto proto = builder.build(); +Assert.assertFalse(proto.hasContainerType()); +Assert.assertFalse(proto.hasExecutionType()); +Assert.assertFalse(proto.hasNodeLabelExpression()); + +byte[] tokenData = proto.toByteArray(); +DataInputBuffer dib = new DataInputBuffer(); +dib.reset(tokenData, tokenData.length); +ContainerTokenIdentifier tid = new ContainerTokenIdentifier(); +tid.readFields(dib); + +Assert.assertEquals("container type", +ContainerType.TASK, tid.getContainerType()); +Assert.assertEquals("execution type", +ExecutionType.GUARANTEED, tid.getExecutionType()); +Assert.assertEquals("node label expression", +CommonNodeLabelsManager.NO_LABEL, tid.getNodeLabelExpression()); + } + + @Test public void testContainerTokenIdentifier() throws IOException {
[05/50] [abbrv] hadoop git commit: HDFS-13493. Reduce the HttpServer2 thread count on DataNodes. Contributed by Erik Krogen.
HDFS-13493. Reduce the HttpServer2 thread count on DataNodes. Contributed by Erik Krogen. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cddbbe5f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cddbbe5f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cddbbe5f Branch: refs/heads/YARN-7402 Commit: cddbbe5f690e4617413f6e986adc6fa900629f03 Parents: e30938a Author: Inigo Goiri Authored: Wed May 23 12:12:08 2018 -0700 Committer: Inigo Goiri Committed: Wed May 23 12:12:08 2018 -0700 -- .../hdfs/server/datanode/web/DatanodeHttpServer.java | 14 +- 1 file changed, 13 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/cddbbe5f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java index 0ce327a..4349c26 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java @@ -89,6 +89,13 @@ public class DatanodeHttpServer implements Closeable { private InetSocketAddress httpsAddress; static final Log LOG = LogFactory.getLog(DatanodeHttpServer.class); + // HttpServer threads are only used for the web UI and basic servlets, so + // set them to the minimum possible + private static final int HTTP_SELECTOR_THREADS = 1; + private static final int HTTP_ACCEPTOR_THREADS = 1; + private static final int HTTP_MAX_THREADS = + HTTP_SELECTOR_THREADS + HTTP_ACCEPTOR_THREADS + 1; + public DatanodeHttpServer(final Configuration conf, final DataNode datanode, final ServerSocketChannel externalHttpChannel) @@ -97,7 +104,12 @@ public class DatanodeHttpServer implements Closeable { this.conf = conf; Configuration confForInfoServer = new Configuration(conf); -confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, 10); +confForInfoServer.setInt(HttpServer2.HTTP_MAX_THREADS_KEY, +HTTP_MAX_THREADS); +confForInfoServer.setInt(HttpServer2.HTTP_SELECTOR_COUNT_KEY, +HTTP_SELECTOR_THREADS); +confForInfoServer.setInt(HttpServer2.HTTP_ACCEPTOR_COUNT_KEY, +HTTP_ACCEPTOR_THREADS); int proxyPort = confForInfoServer.getInt(DFS_DATANODE_HTTP_INTERNAL_PROXY_PORT, 0); HttpServer2.Builder builder = new HttpServer2.Builder() - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[35/50] [abbrv] hadoop git commit: HADOOP-15449. Increase default timeout of ZK session to avoid frequent NameNode failover
HADOOP-15449. Increase default timeout of ZK session to avoid frequent NameNode failover Signed-off-by: Akira Ajisaka Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61df174e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61df174e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61df174e Branch: refs/heads/YARN-7402 Commit: 61df174e8b3d582183306cabfa2347c8b96322ff Parents: 04757e5 Author: Karthik Palanisamy Authored: Mon May 28 19:41:07 2018 +0900 Committer: Akira Ajisaka Committed: Mon May 28 19:41:07 2018 +0900 -- .../src/main/java/org/apache/hadoop/ha/ZKFailoverController.java | 2 +- .../hadoop-common/src/main/resources/core-default.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/61df174e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java index a8c19ab..9295288 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java @@ -63,7 +63,7 @@ public abstract class ZKFailoverController { public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum"; private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms"; - private static final int ZK_SESSION_TIMEOUT_DEFAULT = 5*1000; + private static final int ZK_SESSION_TIMEOUT_DEFAULT = 10*1000; private static final String ZK_PARENT_ZNODE_KEY = "ha.zookeeper.parent-znode"; public static final String ZK_ACL_KEY = "ha.zookeeper.acl"; private static final String ZK_ACL_DEFAULT = "world:anyone:rwcda"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/61df174e/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml -- diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index 9564587..75acf48 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -2168,7 +2168,7 @@ ha.zookeeper.session-timeout.ms - 5000 + 1 The session timeout to use when the ZKFC connects to ZooKeeper. Setting this value to a lower value implies that server crashes - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[43/50] [abbrv] hadoop git commit: YARN-8339. Service AM should localize static/archive resource types to container working directory instead of 'resources'. (Suma Shivaprasad via wangda)
YARN-8339. Service AM should localize static/archive resource types to container working directory instead of 'resources'. (Suma Shivaprasad via wangda) Change-Id: I9f8e8f621650347f6c2f9e3420edee9eb2f356a4 Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3061bfcd Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3061bfcd Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3061bfcd Branch: refs/heads/YARN-7402 Commit: 3061bfcde53210d2032df3814243498b27a997b7 Parents: 3c75f8e Author: Wangda Tan Authored: Tue May 29 09:23:11 2018 -0700 Committer: Wangda Tan Committed: Tue May 29 09:23:11 2018 -0700 -- .../org/apache/hadoop/yarn/service/provider/ProviderUtils.java | 3 +-- .../apache/hadoop/yarn/service/provider/TestProviderUtils.java | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3061bfcd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java index 1ad5fd8..ac90992 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/provider/ProviderUtils.java @@ -298,8 +298,7 @@ public class ProviderUtils implements YarnServiceConstants { destFile = new Path(staticFile.getDestFile()); } - String symlink = APP_RESOURCES_DIR + "/" + destFile.getName(); - addLocalResource(launcher, symlink, localResource, destFile); + addLocalResource(launcher, destFile.getName(), localResource, destFile); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/3061bfcd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java index 6e8bc43..5d794d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/test/java/org/apache/hadoop/yarn/service/provider/TestProviderUtils.java @@ -154,11 +154,11 @@ public class TestProviderUtils { ProviderUtils.handleStaticFilesForLocalization(launcher, sfs, compLaunchCtx); - Mockito.verify(launcher).addLocalResource(Mockito.eq("resources/destFile1"), +Mockito.verify(launcher).addLocalResource(Mockito.eq("destFile1"), any(LocalResource.class)); Mockito.verify(launcher).addLocalResource( -Mockito.eq("resources/destFile_2"), any(LocalResource.class)); +Mockito.eq("destFile_2"), any(LocalResource.class)); Mockito.verify(launcher).addLocalResource( -Mockito.eq("resources/sourceFile4"), any(LocalResource.class)); +Mockito.eq("sourceFile4"), any(LocalResource.class)); } } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[04/50] [abbrv] hadoop git commit: YARN-8336. Fix potential connection leak in SchedConfCLI and YarnWebServiceUtils. Contributed by Giovanni Matteo Fumarola.
YARN-8336. Fix potential connection leak in SchedConfCLI and YarnWebServiceUtils. Contributed by Giovanni Matteo Fumarola. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e30938af Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e30938af Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e30938af Branch: refs/heads/YARN-7402 Commit: e30938af1270e079587e7bc06b755f9e93e660a5 Parents: c13dea8 Author: Inigo Goiri Authored: Wed May 23 11:55:31 2018 -0700 Committer: Inigo Goiri Committed: Wed May 23 11:55:31 2018 -0700 -- .../hadoop/yarn/client/cli/SchedConfCLI.java| 42 .../yarn/webapp/util/YarnWebServiceUtils.java | 17 +--- 2 files changed, 38 insertions(+), 21 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e30938af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java index 11bfdd7..a5f3b80 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/SchedConfCLI.java @@ -132,25 +132,35 @@ public class SchedConfCLI extends Configured implements Tool { } Client webServiceClient = Client.create(); -WebResource webResource = webServiceClient.resource(WebAppUtils. -getRMWebAppURLWithScheme(getConf())); -ClientResponse response = webResource.path("ws").path("v1").path("cluster") -.path("scheduler-conf").accept(MediaType.APPLICATION_JSON) -.entity(YarnWebServiceUtils.toJson(updateInfo, -SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON) -.put(ClientResponse.class); -if (response != null) { - if (response.getStatus() == Status.OK.getStatusCode()) { -System.out.println("Configuration changed successfully."); -return 0; +WebResource webResource = webServiceClient +.resource(WebAppUtils.getRMWebAppURLWithScheme(getConf())); +ClientResponse response = null; + +try { + response = + webResource.path("ws").path("v1").path("cluster") + .path("scheduler-conf").accept(MediaType.APPLICATION_JSON) + .entity(YarnWebServiceUtils.toJson(updateInfo, + SchedConfUpdateInfo.class), MediaType.APPLICATION_JSON) + .put(ClientResponse.class); + if (response != null) { +if (response.getStatus() == Status.OK.getStatusCode()) { + System.out.println("Configuration changed successfully."); + return 0; +} else { + System.err.println("Configuration change unsuccessful: " + + response.getEntity(String.class)); +} } else { -System.err.println("Configuration change unsuccessful: " -+ response.getEntity(String.class)); +System.err.println("Configuration change unsuccessful: null response"); } -} else { - System.err.println("Configuration change unsuccessful: null response"); + return -1; +} finally { + if (response != null) { +response.close(); + } + webServiceClient.destroy(); } -return -1; } @VisibleForTesting http://git-wip-us.apache.org/repos/asf/hadoop/blob/e30938af/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java index 1cf1e97..e7bca2c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/util/YarnWebServiceUtils.java @@ -58,11 +58,18 @@ public final class YarnWebServiceUtils { WebResource webResource = webServiceClient.resource(webAppAddress); -ClientResponse response = webResource.path("ws").path("v1") -.path("cluster").path("nodes") -.path(nodeId).accept(MediaType.APPLICATION_JSON) -.get(ClientResponse.class); -return
[06/50] [abbrv] hadoop git commit: YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola.
YARN-8344. Missing nm.stop() in TestNodeManagerResync to fix testKillContainersOnResync. Contributed by Giovanni Matteo Fumarola. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e99e5bf1 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e99e5bf1 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e99e5bf1 Branch: refs/heads/YARN-7402 Commit: e99e5bf104e9664bc1b43a2639d87355d47a77e2 Parents: cddbbe5 Author: Inigo Goiri Authored: Wed May 23 14:15:26 2018 -0700 Committer: Inigo Goiri Committed: Wed May 23 14:15:26 2018 -0700 -- .../nodemanager/TestNodeManagerResync.java | 87 +++- 1 file changed, 48 insertions(+), 39 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e99e5bf1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java index 97e9922..cf33775 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java @@ -150,7 +150,6 @@ public class TestNodeManagerResync { testContainerPreservationOnResyncImpl(nm, true); } - @SuppressWarnings("unchecked") protected void testContainerPreservationOnResyncImpl(TestNodeManager1 nm, boolean isWorkPreservingRestartEnabled) throws IOException, YarnException, InterruptedException { @@ -186,32 +185,35 @@ public class TestNodeManagerResync { } } - @SuppressWarnings("unchecked") + @SuppressWarnings("resource") @Test(timeout=1) public void testNMshutdownWhenResyncThrowException() throws IOException, InterruptedException, YarnException { NodeManager nm = new TestNodeManager3(); YarnConfiguration conf = createNMConfig(); -nm.init(conf); -nm.start(); -Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount()); -nm.getNMDispatcher().getEventHandler() -.handle(new NodeManagerEvent(NodeManagerEventType.RESYNC)); - -synchronized (isNMShutdownCalled) { - while (isNMShutdownCalled.get() == false) { -try { - isNMShutdownCalled.wait(); -} catch (InterruptedException e) { +try { + nm.init(conf); + nm.start(); + Assert.assertEquals(1, ((TestNodeManager3) nm).getNMRegistrationCount()); + nm.getNMDispatcher().getEventHandler() + .handle(new NodeManagerEvent(NodeManagerEventType.RESYNC)); + + synchronized (isNMShutdownCalled) { +while (!isNMShutdownCalled.get()) { + try { +isNMShutdownCalled.wait(); + } catch (InterruptedException e) { + } } } -} -Assert.assertTrue("NM shutdown not called.",isNMShutdownCalled.get()); -nm.stop(); + Assert.assertTrue("NM shutdown not called.", isNMShutdownCalled.get()); +} finally { + nm.stop(); +} } - @SuppressWarnings("unchecked") + @SuppressWarnings("resource") @Test(timeout=6) public void testContainerResourceIncreaseIsSynchronizedWithRMResync() throws IOException, InterruptedException, YarnException { @@ -219,28 +221,32 @@ public class TestNodeManagerResync { YarnConfiguration conf = createNMConfig(); conf.setBoolean( YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true); -nm.init(conf); -nm.start(); -// Start a container and make sure it is in RUNNING state -((TestNodeManager4)nm).startContainer(); -// Simulate a container resource increase in a separate thread -((TestNodeManager4)nm).updateContainerResource(); -// Simulate RM restart by sending a RESYNC event -LOG.info("Sending out RESYNC event"); -nm.getNMDispatcher().getEventHandler().handle( -new NodeManagerEvent(NodeManagerEventType.RESYNC)); try { - syncBarrier.await(); -} catch (BrokenBarrierException e) { - e.printStackTrace(); + nm.init(conf); + nm.start(); + // Start a container and make sure it is in RUNNING state + ((TestNodeManager4) nm).startContainer(); + // Simulate a container resource increase in a separate thread +
[29/50] [abbrv] hadoop git commit: HDFS-13620. Randomize the test directory path for TestHDFSFileSystemContract. Contributed by Anbang Hu.
HDFS-13620. Randomize the test directory path for TestHDFSFileSystemContract. Contributed by Anbang Hu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8605a385 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8605a385 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8605a385 Branch: refs/heads/YARN-7402 Commit: 8605a38514b4f7a2a549c7ecf8e1421e61bb4d67 Parents: 2a9652e Author: Inigo Goiri Authored: Fri May 25 19:43:33 2018 -0700 Committer: Inigo Goiri Committed: Fri May 25 19:43:33 2018 -0700 -- .../org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java | 6 +- 1 file changed, 5 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8605a385/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java index 50d1e75..6da46de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs; +import java.io.File; import java.io.IOException; import org.apache.hadoop.conf.Configuration; @@ -25,6 +26,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.FileSystemContractBaseTest; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; @@ -39,7 +41,9 @@ public class TestHDFSFileSystemContract extends FileSystemContractBaseTest { Configuration conf = new HdfsConfiguration(); conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, FileSystemContractBaseTest.TEST_UMASK); -cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); +File basedir = GenericTestUtils.getRandomizedTestDir(); +cluster = new MiniDFSCluster.Builder(conf, basedir).numDataNodes(2) +.build(); fs = cluster.getFileSystem(); defaultWorkingDirectory = "/user/" + UserGroupInformation.getCurrentUser().getShortUserName(); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[27/50] [abbrv] hadoop git commit: HDFS-13619. TestAuditLoggerWithCommands fails on Windows. Contributed by Anbang Hu.
HDFS-13619. TestAuditLoggerWithCommands fails on Windows. Contributed by Anbang Hu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/13d25289 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/13d25289 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/13d25289 Branch: refs/heads/YARN-7402 Commit: 13d25289076b39daf481fb1ee15939dbfe4a6b23 Parents: 8733012 Author: Inigo Goiri Authored: Fri May 25 13:32:34 2018 -0700 Committer: Inigo Goiri Committed: Fri May 25 13:32:34 2018 -0700 -- .../hdfs/server/namenode/TestAuditLoggerWithCommands.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/13d25289/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java index 41ee03f..222a1de 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLoggerWithCommands.java @@ -1264,8 +1264,9 @@ public class TestAuditLoggerWithCommands { } private int verifyAuditLogs(String pattern) { -int length = auditlog.getOutput().split("\n").length; -String lastAudit = auditlog.getOutput().split("\n")[length - 1]; +int length = auditlog.getOutput().split(System.lineSeparator()).length; +String lastAudit = auditlog.getOutput() +.split(System.lineSeparator())[length - 1]; assertTrue("Unexpected log!", lastAudit.matches(pattern)); return length; } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[49/50] [abbrv] hadoop git commit: YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen
YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5da8ca6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5da8ca6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5da8ca6 Branch: refs/heads/YARN-7402 Commit: f5da8ca6f04b7db40fccfd00cc4ff8ca1b2da74b Parents: 46a4a94 Author: Botong Huang Authored: Fri Mar 23 17:07:10 2018 -0700 Committer: Botong Huang Committed: Tue May 29 10:48:40 2018 -0700 -- .../hadoop/yarn/conf/YarnConfiguration.java | 36 +- .../src/main/resources/yarn-default.xml | 40 +++ .../utils/FederationStateStoreFacade.java | 13 + .../pom.xml | 18 + .../globalpolicygenerator/GPGContext.java | 4 + .../globalpolicygenerator/GPGContextImpl.java | 10 + .../globalpolicygenerator/GPGPolicyFacade.java | 220 .../server/globalpolicygenerator/GPGUtils.java | 80 + .../GlobalPolicyGenerator.java | 17 + .../policygenerator/GlobalPolicy.java | 76 + .../policygenerator/NoOpGlobalPolicy.java | 36 ++ .../policygenerator/PolicyGenerator.java| 261 ++ .../UniformWeightedLocalityGlobalPolicy.java| 71 .../policygenerator/package-info.java | 24 ++ .../TestGPGPolicyFacade.java| 202 +++ .../policygenerator/TestPolicyGenerator.java| 338 +++ .../src/test/resources/schedulerInfo1.json | 134 .../src/test/resources/schedulerInfo2.json | 196 +++ 18 files changed, 1775 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 7c78e0d..b224818 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -3326,7 +3326,7 @@ public class YarnConfiguration extends Configuration { public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED = false; - private static final String FEDERATION_GPG_PREFIX = + public static final String FEDERATION_GPG_PREFIX = FEDERATION_PREFIX + "gpg."; // The number of threads to use for the GPG scheduled executor service @@ -3344,6 +3344,40 @@ public class YarnConfiguration extends Configuration { FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms"; public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 180; + public static final String FEDERATION_GPG_POLICY_PREFIX = + FEDERATION_GPG_PREFIX + "policy.generator."; + + /** The interval at which the policy generator runs, default is one hour. */ + public static final String GPG_POLICY_GENERATOR_INTERVAL_MS = + FEDERATION_GPG_POLICY_PREFIX + "interval-ms"; + public static final long DEFAULT_GPG_POLICY_GENERATOR_INTERVAL_MS = -1; + + /** + * The configured policy generator class, runs NoOpGlobalPolicy by + * default. + */ + public static final String GPG_GLOBAL_POLICY_CLASS = + FEDERATION_GPG_POLICY_PREFIX + "class"; + public static final String DEFAULT_GPG_GLOBAL_POLICY_CLASS = + "org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator." + + "NoOpGlobalPolicy"; + + /** + * Whether or not the policy generator is running in read only (won't modify + * policies), default is false. + */ + public static final String GPG_POLICY_GENERATOR_READONLY = + FEDERATION_GPG_POLICY_PREFIX + "readonly"; + public static final boolean DEFAULT_GPG_POLICY_GENERATOR_READONLY = + false; + + /** + * Which sub-clusters the policy generator should blacklist. + */ + public static final String GPG_POLICY_GENERATOR_BLACKLIST = + FEDERATION_GPG_POLICY_PREFIX + "blacklist"; + + // Other Configs http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5da8ca6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-
[47/50] [abbrv] hadoop git commit: YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola.
YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5bf22dc Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5bf22dc Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5bf22dc Branch: refs/heads/YARN-7402 Commit: c5bf22dc13b5bbe57b45fe81dd2d912af3b87602 Parents: f5da8ca Author: Botong Huang Authored: Wed May 23 12:45:32 2018 -0700 Committer: Botong Huang Committed: Tue May 29 10:48:40 2018 -0700 -- .../server/globalpolicygenerator/GPGUtils.java | 31 +--- 1 file changed, 20 insertions(+), 11 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5bf22dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java index 429bec4..31cee1c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java @@ -18,21 +18,22 @@ package org.apache.hadoop.yarn.server.globalpolicygenerator; +import static javax.servlet.http.HttpServletResponse.SC_OK; + import java.util.HashMap; import java.util.Map; import java.util.Set; -import javax.servlet.http.HttpServletResponse; import javax.ws.rs.core.MediaType; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource; -import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; -import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; /** * GPGUtils contains utility functions for the GPG. @@ -53,15 +54,23 @@ public final class GPGUtils { T obj = null; WebResource webResource = client.resource(webAddr); -ClientResponse response = webResource.path("ws/v1/cluster").path(path) -.accept(MediaType.APPLICATION_XML).get(ClientResponse.class); -if (response.getStatus() == HttpServletResponse.SC_OK) { - obj = response.getEntity(returnType); -} else { - throw new YarnRuntimeException("Bad response from remote web service: " - + response.getStatus()); +ClientResponse response = null; +try { + response = webResource.path("ws/v1/cluster").path(path) + .accept(MediaType.APPLICATION_XML).get(ClientResponse.class); + if (response.getStatus() == SC_OK) { +obj = response.getEntity(returnType); + } else { +throw new YarnRuntimeException( +"Bad response from remote web service: " + response.getStatus()); + } + return obj; +} finally { + if (response != null) { +response.close(); + } + client.destroy(); } -return obj; } /** - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[38/50] [abbrv] hadoop git commit: HDFS-13591. TestDFSShell#testSetrepLow fails on Windows. Contributed by Anbang Hu.
HDFS-13591. TestDFSShell#testSetrepLow fails on Windows. Contributed by Anbang Hu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9dbf4f01 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9dbf4f01 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9dbf4f01 Branch: refs/heads/YARN-7402 Commit: 9dbf4f01665d5480a70395a24519cbab5d4db0c5 Parents: 91d7c74 Author: Inigo Goiri Authored: Mon May 28 16:34:02 2018 -0700 Committer: Inigo Goiri Committed: Mon May 28 16:34:02 2018 -0700 -- .../test/java/org/apache/hadoop/hdfs/TestDFSShell.java| 10 +- 1 file changed, 5 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/9dbf4f01/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index e82863a..c352dc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -2829,11 +2829,11 @@ public class TestDFSShell { System.setErr(origErr); } - assertEquals("Error message is not the expected error message", - "setrep: Requested replication factor of 1 is less than " - + "the required minimum of 2 for /tmp/TestDFSShell-" - + "testSetrepLow/testFileForSetrepLow\n", - bao.toString()); + assertTrue("Error message is not the expected error message" + + bao.toString(), bao.toString().startsWith( + "setrep: Requested replication factor of 1 is less than " + + "the required minimum of 2 for /tmp/TestDFSShell-" + + "testSetrepLow/testFileForSetrepLow")); } finally { shell.close(); cluster.shutdown(); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong)
Repository: hadoop Updated Branches: refs/heads/YARN-7402 1702dfa7f -> fd03fd45d YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd03fd45 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd03fd45 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd03fd45 Branch: refs/heads/YARN-7402 Commit: fd03fd45df84b45aefee347a6c1f89a05cd7fe68 Parents: 1702dfa Author: Botong Huang <bot...@apache.org> Authored: Thu Feb 1 14:43:48 2018 -0800 Committer: Botong Huang <bot...@apache.org> Committed: Thu Feb 1 14:43:48 2018 -0800 -- .../dev-support/findbugs-exclude.xml| 5 + .../hadoop/yarn/conf/YarnConfiguration.java | 18 +++ .../src/main/resources/yarn-default.xml | 24 .../store/impl/MemoryFederationStateStore.java | 13 ++ .../utils/FederationStateStoreFacade.java | 41 ++- .../GlobalPolicyGenerator.java | 92 ++- .../subclustercleaner/SubClusterCleaner.java| 109 + .../subclustercleaner/package-info.java | 19 +++ .../TestSubClusterCleaner.java | 118 +++ 9 files changed, 409 insertions(+), 30 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd03fd45/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 6a10312..d4ab8f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -380,6 +380,11 @@ + + + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd03fd45/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 271b666..eabe413 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -3160,6 +3160,24 @@ public class YarnConfiguration extends Configuration { public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED = false; + private static final String FEDERATION_GPG_PREFIX = + FEDERATION_PREFIX + "gpg."; + + // The number of threads to use for the GPG scheduled executor service + public static final String GPG_SCHEDULED_EXECUTOR_THREADS = + FEDERATION_GPG_PREFIX + "scheduled.executor.threads"; + public static final int DEFAULT_GPG_SCHEDULED_EXECUTOR_THREADS = 10; + + // The interval at which the subcluster cleaner runs, -1 means disabled + public static final String GPG_SUBCLUSTER_CLEANER_INTERVAL_MS = + FEDERATION_GPG_PREFIX + "subcluster.cleaner.interval-ms"; + public static final long DEFAULT_GPG_SUBCLUSTER_CLEANER_INTERVAL_MS = -1; + + // The expiration time for a subcluster heartbeat, default is 30 minutes + public static final String GPG_SUBCLUSTER_EXPIRATION_MS = + FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms"; + public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 180; + // Other Configs http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd03fd45/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 017799a..899c210 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -3403,6 +3403,30 @@ + The number of threads to use for the GPG scheduled executor service. + +yarn.federation.gpg.scheduled.executor.threads +10 + + + + + The interval at which the subcluster cleaner runs, -1 means disabled. + +yar
[39/50] [abbrv] hadoop git commit: YARN-8592. [UI2] rmip:port/ui2 endpoint shows a blank page in windows OS and Chrome browser. Contributed by Akhil PB.
YARN-8592. [UI2] rmip:port/ui2 endpoint shows a blank page in windows OS and Chrome browser. Contributed by Akhil PB. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97870ec1 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97870ec1 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97870ec1 Branch: refs/heads/YARN-7402 Commit: 97870ec1f6e40ee86f29411d71b6c687bbed Parents: 1ea8116 Author: Sunil G Authored: Thu Aug 2 16:10:54 2018 +0530 Committer: Sunil G Committed: Thu Aug 2 16:10:54 2018 +0530 -- .../src/main/webapp/app/utils/date-utils.js | 14 ++ 1 file changed, 10 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/97870ec1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js index 6a9780c..4abdc72 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js @@ -16,12 +16,18 @@ * limitations under the License. */ - const defaultTz = "America/Los_Angeles"; const getDefaultTimezone = () => { - return moment.tz.guess() || defaultTz; + let timezone = defaultTz; + try { +timezone = moment.tz.guess(); + } catch (e) { +console.log(e); + } + return timezone || defaultTz; }; -export const convertTimestampWithTz = (timestamp, format = "/MM/DD") => - moment.tz(parseInt(timestamp), getDefaultTimezone()).format(format); +export const convertTimestampWithTz = (timestamp, format = "/MM/DD") => { + return moment.tz(parseInt(timestamp), getDefaultTimezone()).format(format); +}; - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[47/50] [abbrv] hadoop git commit: Updating GPG module pom version post rebase.
Updating GPG module pom version post rebase. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3213acd0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3213acd0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3213acd0 Branch: refs/heads/YARN-7402 Commit: 3213acd04980520f37dd1fa47c18c4fd7a3ca339 Parents: f83fc85 Author: Subru Krishnan Authored: Wed May 30 12:59:22 2018 -0700 Committer: Botong Huang Committed: Thu Aug 2 09:59:48 2018 -0700 -- .../hadoop-yarn-server-globalpolicygenerator/pom.xml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3213acd0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml index 9398b0b..c137c9e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/pom.xml @@ -19,12 +19,12 @@ hadoop-yarn-server org.apache.hadoop -3.1.0-SNAPSHOT +3.2.0-SNAPSHOT 4.0.0 org.apache.hadoop hadoop-yarn-server-globalpolicygenerator - 3.1.0-SNAPSHOT + 3.2.0-SNAPSHOT hadoop-yarn-server-globalpolicygenerator - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[45/50] [abbrv] hadoop git commit: YARN-3660. [GPG] Federation Global Policy Generator (service hook only). (Contributed by Botong Huang via curino)
YARN-3660. [GPG] Federation Global Policy Generator (service hook only). (Contributed by Botong Huang via curino) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48a83794 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48a83794 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48a83794 Branch: refs/heads/YARN-7402 Commit: 48a83794a7f7b6edf815f09cce8cfc054a97 Parents: 7526815 Author: Carlo Curino Authored: Thu Jan 18 17:21:06 2018 -0800 Committer: Botong Huang Committed: Thu Aug 2 09:59:48 2018 -0700 -- hadoop-project/pom.xml | 6 + hadoop-yarn-project/hadoop-yarn/bin/yarn| 5 + hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd| 55 +--- .../hadoop-yarn/conf/yarn-env.sh| 12 ++ .../pom.xml | 98 + .../globalpolicygenerator/GPGContext.java | 31 + .../globalpolicygenerator/GPGContextImpl.java | 41 ++ .../GlobalPolicyGenerator.java | 136 +++ .../globalpolicygenerator/package-info.java | 19 +++ .../TestGlobalPolicyGenerator.java | 38 ++ .../hadoop-yarn/hadoop-yarn-server/pom.xml | 1 + hadoop-yarn-project/pom.xml | 4 + 12 files changed, 424 insertions(+), 22 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-project/pom.xml -- diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 387a3da..ede6af4 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -446,6 +446,12 @@ org.apache.hadoop +hadoop-yarn-server-globalpolicygenerator +${project.version} + + + +org.apache.hadoop hadoop-yarn-services-core ${hadoop.version} http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/hadoop-yarn/bin/yarn -- diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn index 69afe6f..8061859 100755 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn @@ -39,6 +39,7 @@ function hadoop_usage hadoop_add_subcommand "container" client "prints container(s) report" hadoop_add_subcommand "daemonlog" admin "get/set the log level for each daemon" hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables" + hadoop_add_subcommand "globalpolicygenerator" daemon "run the Global Policy Generator" hadoop_add_subcommand "jar " client "run a jar file" hadoop_add_subcommand "logs" client "dump container logs" hadoop_add_subcommand "node" admin "prints node report(s)" @@ -103,6 +104,10 @@ ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}" echo "HADOOP_TOOLS_LIB_JARS_DIR='${HADOOP_TOOLS_LIB_JARS_DIR}'" exit 0 ;; +globalpolicygenerator) + HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true" + HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.globalpolicygenerator.GlobalPolicyGenerator' +;; jar) HADOOP_CLASSNAME=org.apache.hadoop.util.RunJar ;; http://git-wip-us.apache.org/repos/asf/hadoop/blob/48a83794/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd -- diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd index e1ac112..bebfd71 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd @@ -134,6 +134,10 @@ if "%1" == "--loglevel" ( set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-router\target\classes ) + if exist %HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes ( +set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\yarn-server\yarn-server-globalpolicygenerator\target\classes + ) + if exist %HADOOP_YARN_HOME%\build\test\classes ( set CLASSPATH=%CLASSPATH%;%HADOOP_YARN_HOME%\build\test\classes ) @@ -155,7 +159,7 @@ if "%1" == "--loglevel" ( set yarncommands=resourcemanager nodemanager proxyserver rmadmin version jar ^ application applicationattempt container node queue logs daemonlog historyserver ^ - timelineserver timelinereader router classpath + timelineserver timelinereader router globalpolicygenerator classpath for %%i in ( %yarncommands% ) do ( if %yarn-command%
[26/50] [abbrv] hadoop git commit: HDFS-13322 fuse dfs - uid persists when switching between ticket caches. Contributed by Istvan Fajth.
HDFS-13322 fuse dfs - uid persists when switching between ticket caches. Contributed by Istvan Fajth. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40f9b0c5 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40f9b0c5 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40f9b0c5 Branch: refs/heads/YARN-7402 Commit: 40f9b0c5c13f40921b6976589543a04efa489f93 Parents: c835fc0 Author: Aaron Fabbri Authored: Tue Jul 31 15:21:38 2018 -0700 Committer: Aaron Fabbri Committed: Tue Jul 31 18:44:49 2018 -0700 -- .../src/main/native/fuse-dfs/fuse_connect.c| 17 + 1 file changed, 13 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/40f9b0c5/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c index 6ee4ad5..f08917a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c +++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/src/main/native/fuse-dfs/fuse_connect.c @@ -192,7 +192,7 @@ int fuseConnectInit(const char *nnUri, int port) } /** - * Compare two libhdfs connections by username + * Compare two libhdfs connections by username and Kerberos ticket cache path * * @param aThe first libhdfs connection * @param bThe second libhdfs connection @@ -201,22 +201,26 @@ int fuseConnectInit(const char *nnUri, int port) */ static int hdfsConnCompare(const struct hdfsConn *a, const struct hdfsConn *b) { - return strcmp(a->usrname, b->usrname); + int rc = strcmp(a->usrname, b->usrname); + if (rc) return rc; + return gHdfsAuthConf == AUTH_CONF_KERBEROS && strcmp(a->kpath, b->kpath); } /** * Find a libhdfs connection by username * * @param usrname The username to look up + * @param kpath The Kerberos ticket cache file path * * @returnThe connection, or NULL if none could be found */ -static struct hdfsConn* hdfsConnFind(const char *usrname) +static struct hdfsConn* hdfsConnFind(const char *usrname, const char *kpath) { struct hdfsConn exemplar; memset(, 0, sizeof(exemplar)); exemplar.usrname = (char*)usrname; + exemplar.kpath = (char*)kpath; return RB_FIND(hdfsConnTree, , ); } @@ -542,8 +546,13 @@ static int fuseConnect(const char *usrname, struct fuse_context *ctx, int ret; struct hdfsConn* conn; + char kpath[PATH_MAX] = { 0 }; + if (gHdfsAuthConf == AUTH_CONF_KERBEROS) { +findKerbTicketCachePath(ctx, kpath, sizeof(kpath)); + } + pthread_mutex_lock(); - conn = hdfsConnFind(usrname); + conn = hdfsConnFind(usrname, kpath); if (!conn) { ret = fuseNewConnect(usrname, ctx, ); if (ret) { - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[13/50] [abbrv] hadoop git commit: HDDS-302. Fix javadoc and add implementation details in ContainerStateMachine. Contributed by Shashikant Banerjee.
HDDS-302. Fix javadoc and add implementation details in ContainerStateMachine. Contributed by Shashikant Banerjee. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/952dc2fd Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/952dc2fd Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/952dc2fd Branch: refs/heads/YARN-7402 Commit: 952dc2fd557f9aaf0f144ee32d0b7731a84bad73 Parents: 3108d27 Author: Mukul Kumar Singh Authored: Mon Jul 30 18:45:58 2018 +0530 Committer: Mukul Kumar Singh Committed: Mon Jul 30 18:45:58 2018 +0530 -- .../hadoop/hdds/scm/XceiverClientRatis.java | 30 ++ .../java/org/apache/hadoop/hdds/HddsUtils.java | 33 .../server/ratis/ContainerStateMachine.java | 14 - 3 files changed, 49 insertions(+), 28 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/952dc2fd/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java -- diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index 0effa8f..2541415 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.ratis.shaded.com.google.protobuf .InvalidProtocolBufferException; import org.apache.hadoop.conf.Configuration; @@ -183,34 +184,9 @@ public final class XceiverClientRatis extends XceiverClientSpi { return Objects.requireNonNull(client.get(), "client is null"); } - private boolean isReadOnly(ContainerCommandRequestProto proto) { -switch (proto.getCmdType()) { -case ReadContainer: -case ReadChunk: -case ListKey: -case GetKey: -case GetSmallFile: -case ListContainer: -case ListChunk: - return true; -case CloseContainer: -case WriteChunk: -case UpdateContainer: -case CompactChunk: -case CreateContainer: -case DeleteChunk: -case DeleteContainer: -case DeleteKey: -case PutKey: -case PutSmallFile: -default: - return false; -} - } - private RaftClientReply sendRequest(ContainerCommandRequestProto request) throws IOException { -boolean isReadOnlyRequest = isReadOnly(request); +boolean isReadOnlyRequest = HddsUtils.isReadOnly(request); ByteString byteString = request.toByteString(); LOG.debug("sendCommand {} {}", isReadOnlyRequest, request); final RaftClientReply reply = isReadOnlyRequest ? @@ -222,7 +198,7 @@ public final class XceiverClientRatis extends XceiverClientSpi { private CompletableFuture sendRequestAsync( ContainerCommandRequestProto request) throws IOException { -boolean isReadOnlyRequest = isReadOnly(request); +boolean isReadOnlyRequest = HddsUtils.isReadOnly(request); ByteString byteString = request.toByteString(); LOG.debug("sendCommandAsync {} {}", isReadOnlyRequest, request); return isReadOnlyRequest ? getClient().sendReadOnlyAsync(() -> byteString) : http://git-wip-us.apache.org/repos/asf/hadoop/blob/952dc2fd/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index 48c6dce..33bf90c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -24,6 +24,7 @@ import com.google.common.net.HostAndPort; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.net.DNS; import org.apache.hadoop.net.NetUtils; @@ -315,4 +316,36 @@ public final class HddsUtils { return name; } + /** + * Checks if the container command is read only or not. + * @param proto ContainerCommand Request proto + * @return True if its readOnly , false otherwise. + */ + public static boolean isReadOnly( + ContainerProtos.ContainerCommandRequestProto proto) { +switch (proto.getCmdType()) { +case ReadContainer: +case ReadChunk: +case ListKey: +case
[21/50] [abbrv] hadoop git commit: YARN-8175. Add support for Node Labels in SLS. Contributed by Abhishek Modi.
YARN-8175. Add support for Node Labels in SLS. Contributed by Abhishek Modi. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9fea5c9e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9fea5c9e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9fea5c9e Branch: refs/heads/YARN-7402 Commit: 9fea5c9ee76bd36f273ae93afef5f3ef3c477a53 Parents: b28bdc7 Author: Inigo Goiri Authored: Tue Jul 31 09:36:34 2018 -0700 Committer: Inigo Goiri Committed: Tue Jul 31 09:36:34 2018 -0700 -- .../org/apache/hadoop/yarn/sls/SLSRunner.java | 93 +++- .../hadoop/yarn/sls/appmaster/AMSimulator.java | 9 +- .../yarn/sls/appmaster/MRAMSimulator.java | 5 +- .../yarn/sls/appmaster/StreamAMSimulator.java | 5 +- .../hadoop/yarn/sls/conf/SLSConfiguration.java | 1 + .../yarn/sls/nodemanager/NMSimulator.java | 13 ++- .../apache/hadoop/yarn/sls/utils/SLSUtils.java | 58 .../yarn/sls/appmaster/TestAMSimulator.java | 35 +++- .../hadoop/yarn/sls/utils/TestSLSUtils.java | 64 ++ .../test/resources/nodes-with-resources.json| 8 +- .../hadoop/yarn/client/cli/RMAdminCLI.java | 71 +-- .../yarn/client/util/YarnClientUtils.java | 77 12 files changed, 301 insertions(+), 138 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/9fea5c9e/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java -- diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java index e859732..1e83e40 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/SLSRunner.java @@ -60,6 +60,7 @@ import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.ReservationId; import org.apache.hadoop.yarn.api.records.Resource; @@ -298,30 +299,20 @@ public class SLSRunner extends Configured implements Tool { SLSConfiguration.NM_RESOURCE_UTILIZATION_RATIO, SLSConfiguration.NM_RESOURCE_UTILIZATION_RATIO_DEFAULT); // nm information (fetch from topology file, or from sls/rumen json file) -Map nodeResourceMap = new HashMap<>(); -Set nodeSet; +Set nodeSet = null; if (nodeFile.isEmpty()) { for (String inputTrace : inputTraces) { switch (inputType) { case SLS: nodeSet = SLSUtils.parseNodesFromSLSTrace(inputTrace); - for (String node : nodeSet) { -nodeResourceMap.put(node, null); - } break; case RUMEN: nodeSet = SLSUtils.parseNodesFromRumenTrace(inputTrace); - for (String node : nodeSet) { -nodeResourceMap.put(node, null); - } break; case SYNTH: stjp = new SynthTraceJobProducer(getConf(), new Path(inputTraces[0])); nodeSet = SLSUtils.generateNodes(stjp.getNumNodes(), stjp.getNumNodes()/stjp.getNodesPerRack()); - for (String node : nodeSet) { -nodeResourceMap.put(node, null); - } break; default: throw new YarnException("Input configuration not recognized, " @@ -329,11 +320,11 @@ public class SLSRunner extends Configured implements Tool { } } } else { - nodeResourceMap = SLSUtils.parseNodesFromNodeFile(nodeFile, + nodeSet = SLSUtils.parseNodesFromNodeFile(nodeFile, nodeManagerResource); } -if (nodeResourceMap.size() == 0) { +if (nodeSet == null || nodeSet.isEmpty()) { throw new YarnException("No node! Please configure nodes."); } @@ -344,20 +335,21 @@ public class SLSRunner extends Configured implements Tool { SLSConfiguration.RUNNER_POOL_SIZE_DEFAULT); ExecutorService executorService = Executors. newFixedThreadPool(threadPoolSize); -for (Map.Entry entry : nodeResourceMap.entrySet()) { +for (NodeDetails nodeDetails : nodeSet) { executorService.submit(new Runnable() { @Override public void run() { try { // we randomize the heartbeat start time from zero to 1 interval NMSimulator nm = new NMSimulator(); Resource nmResource = nodeManagerResource; -String hostName = entry.getKey(); -
[37/50] [abbrv] hadoop git commit: HDDS-310. VolumeSet shutdown hook fails on datanode restart. Contributed by Bharat Viswanadham.
HDDS-310. VolumeSet shutdown hook fails on datanode restart. Contributed by Bharat Viswanadham. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41da2050 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41da2050 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41da2050 Branch: refs/heads/YARN-7402 Commit: 41da2050bdec14709a86fa8a5cf7da82415fd989 Parents: 735b492 Author: Nanda kumar Authored: Thu Aug 2 11:35:22 2018 +0530 Committer: Nanda kumar Committed: Thu Aug 2 11:35:22 2018 +0530 -- .../ozone/container/common/volume/VolumeSet.java | 15 +-- 1 file changed, 13 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/41da2050/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java index 4a1487b..06f48fc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java @@ -167,7 +167,7 @@ public class VolumeSet { // Ensure volume threads are stopped and scm df is saved during shutdown. shutdownHook = () -> { - shutdown(); + saveVolumeSetUsed(); }; ShutdownHookManager.get().addShutdownHook(shutdownHook, SHUTDOWN_HOOK_PRIORITY); @@ -303,7 +303,11 @@ public class VolumeSet { return choosingPolicy.chooseVolume(getVolumesList(), containerSize); } - public void shutdown() { + /** + * This method, call shutdown on each volume to shutdown volume usage + * thread and write scmUsed on each volume. + */ + private void saveVolumeSetUsed() { for (HddsVolume hddsVolume : volumeMap.values()) { try { hddsVolume.shutdown(); @@ -312,7 +316,14 @@ public class VolumeSet { ex); } } + } + /** + * Shutdown's the volumeset, if saveVolumeSetUsed is false, call's + * {@link VolumeSet#saveVolumeSetUsed}. + */ + public void shutdown() { +saveVolumeSetUsed(); if (shutdownHook != null) { ShutdownHookManager.get().removeShutdownHook(shutdownHook); } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[49/50] [abbrv] hadoop git commit: YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json new file mode 100644 index 000..2ff879e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/test/resources/schedulerInfo2.json @@ -0,0 +1,196 @@ + { + "type": "capacityScheduler", + "capacity": 100.0, + "usedCapacity": 0.0, + "maxCapacity": 100.0, + "queueName": "root", + "queues": { +"queue": [ + { +"type": "capacitySchedulerLeafQueueInfo", +"capacity": 100.0, +"usedCapacity": 0.0, +"maxCapacity": 100.0, +"absoluteCapacity": 100.0, +"absoluteMaxCapacity": 100.0, +"absoluteUsedCapacity": 0.0, +"numApplications": 484, +"queueName": "default", +"state": "RUNNING", +"resourcesUsed": { + "memory": 0, + "vCores": 0 +}, +"hideReservationQueues": false, +"nodeLabels": [ + "*" +], +"numActiveApplications": 484, +"numPendingApplications": 0, +"numContainers": 0, +"maxApplications": 1, +"maxApplicationsPerUser": 1, +"userLimit": 100, +"users": { + "user": [ +{ + "username": "Default", + "resourcesUsed": { +"memory": 0, +"vCores": 0 + }, + "numPendingApplications": 0, + "numActiveApplications": 468, + "AMResourceUsed": { +"memory": 30191616, +"vCores": 468 + }, + "userResourceLimit": { +"memory": 31490048, +"vCores": 7612 + } +} + ] +}, +"userLimitFactor": 1.0, +"AMResourceLimit": { + "memory": 31490048, + "vCores": 7612 +}, +"usedAMResource": { + "memory": 30388224, + "vCores": 532 +}, +"userAMResourceLimit": { + "memory": 31490048, + "vCores": 7612 +}, +"preemptionDisabled": true + }, + { +"type": "capacitySchedulerLeafQueueInfo", +"capacity": 100.0, +"usedCapacity": 0.0, +"maxCapacity": 100.0, +"absoluteCapacity": 100.0, +"absoluteMaxCapacity": 100.0, +"absoluteUsedCapacity": 0.0, +"numApplications": 484, +"queueName": "default2", +"state": "RUNNING", +"resourcesUsed": { + "memory": 0, + "vCores": 0 +}, +"hideReservationQueues": false, +"nodeLabels": [ + "*" +], +"numActiveApplications": 484, +"numPendingApplications": 0, +"numContainers": 0, +"maxApplications": 1, +"maxApplicationsPerUser": 1, +"userLimit": 100, +"users": { + "user": [ +{ + "username": "Default", + "resourcesUsed": { +"memory": 0, +"vCores": 0 + }, + "numPendingApplications": 0, + "numActiveApplications": 468, + "AMResourceUsed": { +"memory": 30191616, +"vCores": 468 + }, + "userResourceLimit": { +"memory": 31490048, +"vCores": 7612 + } +} + ] +}, +"userLimitFactor": 1.0, +"AMResourceLimit": { + "memory": 31490048, + "vCores": 7612 +}, +"usedAMResource": { + "memory": 30388224, + "vCores": 532 +}, +"userAMResourceLimit": { + "memory": 31490048, + "vCores": 7612 +}, +"preemptionDisabled": true + } +] + }, + "health": { +"lastrun": 1517951638085, +"operationsInfo": { + "entry": { +"key":
[50/50] [abbrv] hadoop git commit: YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen
YARN-7707. [GPG] Policy generator framework. Contributed by Young Chen Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6800cf70 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6800cf70 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6800cf70 Branch: refs/heads/YARN-7402 Commit: 6800cf7015d81cc0085ad0f9159e246842e72187 Parents: f833e1b Author: Botong Huang Authored: Fri Mar 23 17:07:10 2018 -0700 Committer: Botong Huang Committed: Thu Aug 2 09:59:48 2018 -0700 -- .../hadoop/yarn/conf/YarnConfiguration.java | 36 +- .../src/main/resources/yarn-default.xml | 40 +++ .../utils/FederationStateStoreFacade.java | 13 + .../pom.xml | 18 + .../globalpolicygenerator/GPGContext.java | 4 + .../globalpolicygenerator/GPGContextImpl.java | 10 + .../globalpolicygenerator/GPGPolicyFacade.java | 220 .../server/globalpolicygenerator/GPGUtils.java | 80 + .../GlobalPolicyGenerator.java | 17 + .../policygenerator/GlobalPolicy.java | 76 + .../policygenerator/NoOpGlobalPolicy.java | 36 ++ .../policygenerator/PolicyGenerator.java| 261 ++ .../UniformWeightedLocalityGlobalPolicy.java| 71 .../policygenerator/package-info.java | 24 ++ .../TestGPGPolicyFacade.java| 202 +++ .../policygenerator/TestPolicyGenerator.java| 338 +++ .../src/test/resources/schedulerInfo1.json | 134 .../src/test/resources/schedulerInfo2.json | 196 +++ 18 files changed, 1775 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index ec88411..61535fc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -3342,7 +3342,7 @@ public class YarnConfiguration extends Configuration { public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED = false; - private static final String FEDERATION_GPG_PREFIX = + public static final String FEDERATION_GPG_PREFIX = FEDERATION_PREFIX + "gpg."; // The number of threads to use for the GPG scheduled executor service @@ -3360,6 +3360,40 @@ public class YarnConfiguration extends Configuration { FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms"; public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 180; + public static final String FEDERATION_GPG_POLICY_PREFIX = + FEDERATION_GPG_PREFIX + "policy.generator."; + + /** The interval at which the policy generator runs, default is one hour. */ + public static final String GPG_POLICY_GENERATOR_INTERVAL_MS = + FEDERATION_GPG_POLICY_PREFIX + "interval-ms"; + public static final long DEFAULT_GPG_POLICY_GENERATOR_INTERVAL_MS = -1; + + /** + * The configured policy generator class, runs NoOpGlobalPolicy by + * default. + */ + public static final String GPG_GLOBAL_POLICY_CLASS = + FEDERATION_GPG_POLICY_PREFIX + "class"; + public static final String DEFAULT_GPG_GLOBAL_POLICY_CLASS = + "org.apache.hadoop.yarn.server.globalpolicygenerator.policygenerator." + + "NoOpGlobalPolicy"; + + /** + * Whether or not the policy generator is running in read only (won't modify + * policies), default is false. + */ + public static final String GPG_POLICY_GENERATOR_READONLY = + FEDERATION_GPG_POLICY_PREFIX + "readonly"; + public static final boolean DEFAULT_GPG_POLICY_GENERATOR_READONLY = + false; + + /** + * Which sub-clusters the policy generator should blacklist. + */ + public static final String GPG_POLICY_GENERATOR_BLACKLIST = + FEDERATION_GPG_POLICY_PREFIX + "blacklist"; + + // Other Configs http://git-wip-us.apache.org/repos/asf/hadoop/blob/6800cf70/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-
[11/50] [abbrv] hadoop git commit: YARN-8591. [ATSv2] NPE while checking for entity acl in non-secure cluster. Contributed by Rohith Sharma K S.
YARN-8591. [ATSv2] NPE while checking for entity acl in non-secure cluster. Contributed by Rohith Sharma K S. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/63e08ec0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/63e08ec0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/63e08ec0 Branch: refs/heads/YARN-7402 Commit: 63e08ec071852640babea9e39780327a0907712a Parents: 0857f11 Author: Sunil G Authored: Mon Jul 30 14:48:04 2018 +0530 Committer: Sunil G Committed: Mon Jul 30 14:48:04 2018 +0530 -- .../server/timelineservice/reader/TimelineReaderWebServices.java | 3 ++- .../reader/TestTimelineReaderWebServicesBasicAcl.java| 4 2 files changed, 6 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/63e08ec0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java index 7f96bfb..b10b705 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/main/java/org/apache/hadoop/yarn/server/timelineservice/reader/TimelineReaderWebServices.java @@ -3532,7 +3532,8 @@ public class TimelineReaderWebServices { static boolean checkAccess(TimelineReaderManager readerManager, UserGroupInformation ugi, String entityUser) { if (isDisplayEntityPerUserFilterEnabled(readerManager.getConfig())) { - if (!validateAuthUserWithEntityUser(readerManager, ugi, entityUser)) { + if (ugi != null && !validateAuthUserWithEntityUser(readerManager, ugi, + entityUser)) { String userName = ugi.getShortUserName(); String msg = "User " + userName + " is not allowed to read TimelineService V2 data."; http://git-wip-us.apache.org/repos/asf/hadoop/blob/63e08ec0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java index 4239bf0..6651457 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice/src/test/java/org/apache/hadoop/yarn/server/timelineservice/reader/TestTimelineReaderWebServicesBasicAcl.java @@ -88,6 +88,10 @@ public class TestTimelineReaderWebServicesBasicAcl { Assert.assertFalse(TimelineReaderWebServices .validateAuthUserWithEntityUser(manager, null, user1)); +// true because ugi is null +Assert.assertTrue( +TimelineReaderWebServices.checkAccess(manager, null, user1)); + // incoming ugi is admin asking for entity owner user1 Assert.assertTrue( TimelineReaderWebServices.checkAccess(manager, adminUgi, user1)); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[46/50] [abbrv] hadoop git commit: fix build after rebase
fix build after rebase Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b8e71808 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b8e71808 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b8e71808 Branch: refs/heads/YARN-7402 Commit: b8e718082bb6a6a361deb64cd142019b09e5b3d5 Parents: 3213acd Author: Botong Huang Authored: Fri Jul 13 21:29:19 2018 -0700 Committer: Botong Huang Committed: Thu Aug 2 09:59:48 2018 -0700 -- .../yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java | 2 +- .../globalpolicygenerator/subclustercleaner/SubClusterCleaner.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8e71808/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java index 88b9f2b..1ae07f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GlobalPolicyGenerator.java @@ -22,7 +22,7 @@ import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.commons.lang.time.DurationFormatUtils; +import org.apache.commons.lang3.time.DurationFormatUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.service.CompositeService; http://git-wip-us.apache.org/repos/asf/hadoop/blob/b8e71808/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java index dad5121..6410a6d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/subclustercleaner/SubClusterCleaner.java @@ -21,7 +21,7 @@ package org.apache.hadoop.yarn.server.globalpolicygenerator.subclustercleaner; import java.util.Date; import java.util.Map; -import org.apache.commons.lang.time.DurationFormatUtils; +import org.apache.commons.lang3.time.DurationFormatUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[40/50] [abbrv] hadoop git commit: HDDS-304. Process ContainerAction from datanode heartbeat in SCM. Contributed by Nanda Kumar.
HDDS-304. Process ContainerAction from datanode heartbeat in SCM. Contributed by Nanda Kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c368575 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c368575 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c368575 Branch: refs/heads/YARN-7402 Commit: 7c368575a319f5ba98019418166524bac982086f Parents: 97870ec Author: Mukul Kumar Singh Authored: Thu Aug 2 17:34:17 2018 +0530 Committer: Mukul Kumar Singh Committed: Thu Aug 2 17:34:17 2018 +0530 -- .../scm/container/ContainerActionsHandler.java | 60 + .../hadoop/hdds/scm/events/SCMEvents.java | 16 - .../server/SCMDatanodeHeartbeatDispatcher.java | 22 +++ .../scm/server/StorageContainerManager.java | 3 + .../container/TestContainerActionsHandler.java | 68 5 files changed, 168 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c368575/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java new file mode 100644 index 000..ce399eb --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.container; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto +.StorageContainerDatanodeProtocolProtos.ContainerAction; +import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher +.ContainerActionsFromDatanode; +import org.apache.hadoop.hdds.server.events.EventHandler; +import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Handles container reports from datanode. + */ +public class ContainerActionsHandler implements +EventHandler { + + private static final Logger LOG = LoggerFactory.getLogger( + ContainerActionsHandler.class); + + @Override + public void onMessage( + ContainerActionsFromDatanode containerReportFromDatanode, + EventPublisher publisher) { +DatanodeDetails dd = containerReportFromDatanode.getDatanodeDetails(); +for (ContainerAction action : containerReportFromDatanode.getReport() +.getContainerActionsList()) { + ContainerID containerId = ContainerID.valueof(action.getContainerID()); + switch (action.getAction()) { + case CLOSE: +LOG.debug("Closing container {} in datanode {} because the" + +" container is {}.", containerId, dd, action.getReason()); +publisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerId); +break; + default: +LOG.warn("Invalid action {} with reason {}, from datanode {}. ", +action.getAction(), action.getReason(), dd); } +} + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c368575/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java index ad1702b..d49dd4f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/events/SCMEvents.java @@ -20,9 +20,16 @@ package org.apache.hadoop.hdds.scm.events; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import
[23/50] [abbrv] hadoop git commit: YARN-8418. App local logs could leaked if log aggregation fails to initialize for the app. (Bibin A Chundatt via wangda)
YARN-8418. App local logs could leaked if log aggregation fails to initialize for the app. (Bibin A Chundatt via wangda) Change-Id: I29a23ca4b219b48c92e7975cd44cddb8b0e04104 Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b540bbf Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b540bbf Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b540bbf Branch: refs/heads/YARN-7402 Commit: 4b540bbfcf02d828052999215c6135603d98f5db Parents: 8aa93a5 Author: Wangda Tan Authored: Tue Jul 31 12:07:51 2018 -0700 Committer: Wangda Tan Committed: Tue Jul 31 12:08:00 2018 -0700 -- .../LogAggregationFileController.java | 7 ++ .../nodemanager/NodeStatusUpdaterImpl.java | 1 + .../containermanager/ContainerManager.java | 1 + .../containermanager/ContainerManagerImpl.java | 13 ++- .../logaggregation/AppLogAggregator.java| 8 ++ .../logaggregation/AppLogAggregatorImpl.java| 15 .../logaggregation/LogAggregationService.java | 83 .../containermanager/loghandler/LogHandler.java | 7 ++ .../loghandler/NonAggregatingLogHandler.java| 9 +++ .../loghandler/event/LogHandlerEventType.java | 4 +- .../event/LogHandlerTokenUpdatedEvent.java | 26 ++ .../nodemanager/DummyContainerManager.java | 7 ++ .../TestLogAggregationService.java | 34 +--- 13 files changed, 187 insertions(+), 28 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java index b047b1c..6b3c9a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java @@ -43,11 +43,14 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.SecretManager; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.logaggregation.LogAggregationUtils; import org.apache.hadoop.yarn.webapp.View.ViewContext; @@ -365,6 +368,10 @@ public abstract class LogAggregationFileController { } }); } catch (Exception e) { + if (e instanceof RemoteException) { +throw new YarnRuntimeException(((RemoteException) e) +.unwrapRemoteException(SecretManager.InvalidToken.class)); + } throw new YarnRuntimeException(e); } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b540bbf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java index 8154723..faf7adb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeStatusUpdaterImpl.java @@ -1135,6 +1135,7 @@ public class NodeStatusUpdaterImpl extends AbstractService implements if (systemCredentials != null && !systemCredentials.isEmpty()) { ((NMContext)
[38/50] [abbrv] hadoop git commit: YARN-8594. [UI2] Display current logged in user. Contributed by Akhil PB.
YARN-8594. [UI2] Display current logged in user. Contributed by Akhil PB. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ea81169 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ea81169 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ea81169 Branch: refs/heads/YARN-7402 Commit: 1ea81169bad5bd6433348ef8e5e7ac12c5a9cb5e Parents: 41da205 Author: Sunil G Authored: Thu Aug 2 12:41:06 2018 +0530 Committer: Sunil G Committed: Thu Aug 2 12:41:06 2018 +0530 -- .../webapp/app/adapters/cluster-user-info.js| 29 + .../main/webapp/app/controllers/application.js | 10 - .../main/webapp/app/models/cluster-user-info.js | 24 +++ .../src/main/webapp/app/routes/application.js | 6 ++- .../webapp/app/serializers/cluster-user-info.js | 43 .../src/main/webapp/app/styles/app.scss | 12 +- .../main/webapp/app/templates/application.hbs | 15 +-- 7 files changed, 132 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea81169/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-user-info.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-user-info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-user-info.js new file mode 100644 index 000..a49c0f5 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/cluster-user-info.js @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import RESTAbstractAdapter from './restabstract'; + +export default RESTAbstractAdapter.extend({ + address: "rmWebAddress", + restNameSpace: "cluster", + serverName: "RM", + + pathForType(/*modelName*/) { +return 'userinfo'; + } +}); \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea81169/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js index 986b1fd..75b072a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/application.js @@ -58,5 +58,13 @@ export default Ember.Controller.extend({ return true; } return false; - }.property('currentPath') + }.property('currentPath'), + + clusterInfo: function() { +return this.model.clusterInfo.get('firstObject'); + }.property('model.clusterInfo'), + + userInfo: function() { +return this.model.userInfo.get('firstObject'); + }.property('model.userInfo'), }); http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ea81169/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-user-info.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-user-info.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-user-info.js new file mode 100644 index 000..c2867f8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-user-info.js @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may
[22/50] [abbrv] hadoop git commit: YARN-8605. TestDominantResourceFairnessPolicy.testModWhileSorting is flaky. (Wilfred Spiegelenburg via Haibo Chen)
YARN-8605. TestDominantResourceFairnessPolicy.testModWhileSorting is flaky. (Wilfred Spiegelenburg via Haibo Chen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8aa93a57 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8aa93a57 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8aa93a57 Branch: refs/heads/YARN-7402 Commit: 8aa93a575e896c609b97ddab58853b1eb95f0dee Parents: 9fea5c9 Author: Haibo Chen Authored: Tue Jul 31 11:32:40 2018 -0700 Committer: Haibo Chen Committed: Tue Jul 31 11:32:40 2018 -0700 -- .../TestDominantResourceFairnessPolicy.java | 38 +++- 1 file changed, 12 insertions(+), 26 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8aa93a57/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java index 55b7163..c963e0d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/TestDominantResourceFairnessPolicy.java @@ -24,7 +24,6 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.util.ArrayList; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; @@ -458,33 +457,20 @@ public class TestDominantResourceFairnessPolicy { } Comparator DRFComparator = createComparator(10, 5); -// To simulate unallocated resource changes -Thread modThread = modificationThread(schedulableList); -modThread.start(); +/* + * The old sort should fail, but timing it makes testing to flaky. + * TimSort which is used does not handle the concurrent modification of + * objects it is sorting. This is the test that should fail: + * modThread.start(); + * try { + *Collections.sort(schedulableList, DRFComparator); + * } catch (IllegalArgumentException iae) { + *// failed sort + * } + */ -// This should fail: make sure that we do test correctly -// TimSort which is used does not handle the concurrent modification of -// objects it is sorting. -try { - Collections.sort(schedulableList, DRFComparator); - fail("Sorting should have failed and did not"); -} catch (IllegalArgumentException iae) { - assertEquals(iae.getMessage(), "Comparison method violates its general contract!"); -} -try { - modThread.join(); -} catch (InterruptedException ie) { - fail("ModThread join failed: " + ie.getMessage()); -} - -// clean up and try again using TreeSet which should work -schedulableList.clear(); -for (int i=0; i<1; i++) { - schedulableList.add( - (FakeSchedulable)createSchedulable((i%10)*100, (i%3)*2)); -} TreeSet sortedSchedulable = new TreeSet<>(DRFComparator); -modThread = modificationThread(schedulableList); +Thread modThread = modificationThread(schedulableList); modThread.start(); sortedSchedulable.addAll(schedulableList); try { - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[18/50] [abbrv] hadoop git commit: YARN-7974. Allow updating application tracking url after registration. Contributed by Jonathan Hung
YARN-7974. Allow updating application tracking url after registration. Contributed by Jonathan Hung Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e06a5dc Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e06a5dc Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e06a5dc Branch: refs/heads/YARN-7402 Commit: 3e06a5dcea8224ba71aec284df23b47d536bb06d Parents: ee53602 Author: Jonathan Hung Authored: Mon Jul 30 17:41:01 2018 -0700 Committer: Jonathan Hung Committed: Mon Jul 30 17:44:18 2018 -0700 -- .../api/protocolrecords/AllocateRequest.java| 47 +++- .../src/main/proto/yarn_service_protos.proto| 1 + .../hadoop/yarn/client/api/AMRMClient.java | 11 +++ .../yarn/client/api/async/AMRMClientAsync.java | 11 +++ .../api/async/impl/AMRMClientAsyncImpl.java | 5 ++ .../yarn/client/api/impl/AMRMClientImpl.java| 11 +++ .../yarn/client/api/impl/TestAMRMClient.java| 77 .../impl/pb/AllocateRequestPBImpl.java | 27 ++- .../resourcemanager/DefaultAMSProcessor.java| 2 +- .../rmapp/attempt/RMAppAttemptImpl.java | 20 + .../event/RMAppAttemptStatusupdateEvent.java| 11 +++ .../TestApplicationMasterService.java | 34 + .../server/resourcemanager/TestRMRestart.java | 45 13 files changed, 298 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java index eee50e3..799088b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java @@ -73,7 +73,21 @@ public abstract class AllocateRequest { .releaseList(containersToBeReleased) .resourceBlacklistRequest(resourceBlacklistRequest).build(); } - + + @Public + @Unstable + public static AllocateRequest newInstance(int responseID, float appProgress, + List resourceAsk, + List containersToBeReleased, + ResourceBlacklistRequest resourceBlacklistRequest, + String trackingUrl) { +return AllocateRequest.newBuilder().responseId(responseID) +.progress(appProgress).askList(resourceAsk) +.releaseList(containersToBeReleased) +.resourceBlacklistRequest(resourceBlacklistRequest) +.trackingUrl(trackingUrl).build(); + } + @Public @Unstable public static AllocateRequest newInstance(int responseID, float appProgress, @@ -240,6 +254,22 @@ public abstract class AllocateRequest { List schedulingRequests) { } + /** + * Get the tracking url update for this heartbeat. + * @return tracking url to update this application with + */ + @Public + @Unstable + public abstract String getTrackingUrl(); + + /** + * Set the new tracking url for this application. + * @param trackingUrl the new tracking url + */ + @Public + @Unstable + public abstract void setTrackingUrl(String trackingUrl); + @Public @Unstable public static AllocateRequestBuilder newBuilder() { @@ -356,6 +386,19 @@ public abstract class AllocateRequest { } /** + * Set the trackingUrl of the request. + * @see AllocateRequest#setTrackingUrl(String) + * @param trackingUrl new tracking url + * @return {@link AllocateRequestBuilder} + */ +@Public +@Unstable +public AllocateRequestBuilder trackingUrl(String trackingUrl) { + allocateRequest.setTrackingUrl(trackingUrl); + return this; +} + +/** * Return generated {@link AllocateRequest} object. * @return {@link AllocateRequest} */ @@ -365,4 +408,4 @@ public abstract class AllocateRequest { return allocateRequest; } } -} \ No newline at end of file +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e06a5dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto index 92a65ad..acd452d 100644 ---
[43/50] [abbrv] hadoop git commit: YARN-8263. DockerClient still touches hadoop.tmp.dir. Contributed by Craig Condit
YARN-8263. DockerClient still touches hadoop.tmp.dir. Contributed by Craig Condit Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7526815e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7526815e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7526815e Branch: refs/heads/YARN-7402 Commit: 7526815e3234ca352854ecfb142a13f1a188d5bd Parents: 5033d7d Author: Jason Lowe Authored: Thu Aug 2 10:43:48 2018 -0500 Committer: Jason Lowe Committed: Thu Aug 2 10:43:48 2018 -0500 -- .../nodemanager/LinuxContainerExecutor.java | 6 +-- .../runtime/DockerLinuxContainerRuntime.java| 17 +++ .../linux/runtime/docker/DockerClient.java | 53 .../linux/runtime/docker/DockerCommand.java | 6 +-- .../runtime/docker/DockerCommandExecutor.java | 15 ++ .../runtime/docker/DockerInspectCommand.java| 3 +- .../linux/runtime/docker/DockerRmCommand.java | 3 +- .../linux/runtime/docker/TestDockerClient.java | 2 +- .../docker/TestDockerCommandExecutor.java | 20 9 files changed, 30 insertions(+), 95 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7526815e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java index 4253f2f..f75ead2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java @@ -944,12 +944,12 @@ public class LinuxContainerExecutor extends ContainerExecutor { PrivilegedOperationExecutor privOpExecutor = PrivilegedOperationExecutor.getInstance(super.getConf()); if (DockerCommandExecutor.isRemovable( - DockerCommandExecutor.getContainerStatus(containerId, - super.getConf(), privOpExecutor, nmContext))) { + DockerCommandExecutor.getContainerStatus(containerId, privOpExecutor, + nmContext))) { LOG.info("Removing Docker container : " + containerId); DockerRmCommand dockerRmCommand = new DockerRmCommand(containerId); DockerCommandExecutor.executeDockerCommand(dockerRmCommand, containerId, -null, super.getConf(), privOpExecutor, false, nmContext); +null, privOpExecutor, false, nmContext); } } catch (ContainerExecutionException e) { LOG.warn("Unable to remove docker container: " + containerId); http://git-wip-us.apache.org/repos/asf/hadoop/blob/7526815e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java index 88e6c91..5d6f61e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java @@ -298,7 +298,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime { throws ContainerExecutionException { this.nmContext = nmContext; this.conf = conf; -dockerClient = new DockerClient(conf); +dockerClient = new DockerClient(); allowedNetworks.clear(); defaultROMounts.clear(); defaultRWMounts.clear(); @@ -973,7 +973,7 @@ public class DockerLinuxContainerRuntime implements LinuxContainerRuntime { String
[44/50] [abbrv] hadoop git commit: YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola.
YARN-7402. [GPG] Fix potential connection leak in GPGUtils. Contributed by Giovanni Matteo Fumarola. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f83fc85b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f83fc85b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f83fc85b Branch: refs/heads/YARN-7402 Commit: f83fc85bae757f4cd54156eccb8bec692aaf6a21 Parents: 6800cf7 Author: Botong Huang Authored: Wed May 23 12:45:32 2018 -0700 Committer: Botong Huang Committed: Thu Aug 2 09:59:48 2018 -0700 -- .../server/globalpolicygenerator/GPGUtils.java | 31 +--- 1 file changed, 20 insertions(+), 11 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f83fc85b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java index 429bec4..31cee1c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-globalpolicygenerator/src/main/java/org/apache/hadoop/yarn/server/globalpolicygenerator/GPGUtils.java @@ -18,21 +18,22 @@ package org.apache.hadoop.yarn.server.globalpolicygenerator; +import static javax.servlet.http.HttpServletResponse.SC_OK; + import java.util.HashMap; import java.util.Map; import java.util.Set; -import javax.servlet.http.HttpServletResponse; import javax.ws.rs.core.MediaType; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; +import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource; -import org.apache.hadoop.yarn.server.federation.store.records.SubClusterId; -import org.apache.hadoop.yarn.server.federation.store.records.SubClusterIdInfo; /** * GPGUtils contains utility functions for the GPG. @@ -53,15 +54,23 @@ public final class GPGUtils { T obj = null; WebResource webResource = client.resource(webAddr); -ClientResponse response = webResource.path("ws/v1/cluster").path(path) -.accept(MediaType.APPLICATION_XML).get(ClientResponse.class); -if (response.getStatus() == HttpServletResponse.SC_OK) { - obj = response.getEntity(returnType); -} else { - throw new YarnRuntimeException("Bad response from remote web service: " - + response.getStatus()); +ClientResponse response = null; +try { + response = webResource.path("ws/v1/cluster").path(path) + .accept(MediaType.APPLICATION_XML).get(ClientResponse.class); + if (response.getStatus() == SC_OK) { +obj = response.getEntity(returnType); + } else { +throw new YarnRuntimeException( +"Bad response from remote web service: " + response.getStatus()); + } + return obj; +} finally { + if (response != null) { +response.close(); + } + client.destroy(); } -return obj; } /** - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[30/50] [abbrv] hadoop git commit: YARN-8606. Opportunistic scheduling does not work post RM failover. Contributed by Bibin A Chundatt.
YARN-8606. Opportunistic scheduling does not work post RM failover. Contributed by Bibin A Chundatt. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a48a0cc7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a48a0cc7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a48a0cc7 Branch: refs/heads/YARN-7402 Commit: a48a0cc7fd8e7ac1c07b260e6078077824f27c35 Parents: 5cc8e99 Author: Sunil G Authored: Wed Aug 1 12:17:18 2018 +0530 Committer: Sunil G Committed: Wed Aug 1 12:17:18 2018 +0530 -- ...pportunisticContainerAllocatorAMService.java | 4 +- .../server/resourcemanager/ResourceManager.java | 37 ++-- .../yarn/server/resourcemanager/TestRMHA.java | 44 3 files changed, 72 insertions(+), 13 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a48a0cc7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java index 9b13627..15c2a89 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/OpportunisticContainerAllocatorAMService.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -417,7 +418,8 @@ public class OpportunisticContainerAllocatorAMService return nodeMonitor.getThresholdCalculator(); } - private synchronized List getLeastLoadedNodes() { + @VisibleForTesting + synchronized List getLeastLoadedNodes() { long currTime = System.currentTimeMillis(); if ((currTime - lastCacheUpdateTime > cacheRefreshInterval) || (cachedNodes == null)) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/a48a0cc7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 0b7e87c..f14d440 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -757,9 +757,11 @@ public class ResourceManager extends CompositeService implements Recoverable { } masterService = createApplicationMasterService(); + createAndRegisterOpportunisticDispatcher(masterService); addService(masterService) ; rmContext.setApplicationMasterService(masterService); + applicationACLsManager = new ApplicationACLsManager(conf); queueACLsManager = createQueueACLsManager(scheduler, conf); @@ -807,6 +809,23 @@ public class ResourceManager extends CompositeService implements Recoverable { super.serviceInit(conf); } +private void createAndRegisterOpportunisticDispatcher( +ApplicationMasterService service) { + if (!isOpportunisticSchedulingEnabled(conf)) { +return; + } + EventDispatcher oppContainerAllocEventDispatcher = new EventDispatcher( + (OpportunisticContainerAllocatorAMService) service, + OpportunisticContainerAllocatorAMService.class.getName()); + // Add an event dispatcher for the + // OpportunisticContainerAllocatorAMService to handle node + // additions, updates and removals. Since
[41/50] [abbrv] hadoop git commit: HDDS-290. putKey is failing with KEY_ALLOCATION_ERROR. Contributed by Xiaoyu Yao.
HDDS-290. putKey is failing with KEY_ALLOCATION_ERROR. Contributed by Xiaoyu Yao. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e83719c8 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e83719c8 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e83719c8 Branch: refs/heads/YARN-7402 Commit: e83719c830dd4927c8eef26062c56c0d62b2f04f Parents: 7c36857 Author: Nanda kumar Authored: Thu Aug 2 19:02:25 2018 +0530 Committer: Nanda kumar Committed: Thu Aug 2 19:02:25 2018 +0530 -- .../src/main/compose/ozone/docker-config| 1 + .../acceptance/ozonefs/ozonesinglenode.robot| 49 .../apache/hadoop/ozone/web/ozShell/Shell.java | 4 ++ .../ozone/web/ozShell/keys/PutKeyHandler.java | 16 +-- 4 files changed, 66 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e83719c8/hadoop-dist/src/main/compose/ozone/docker-config -- diff --git a/hadoop-dist/src/main/compose/ozone/docker-config b/hadoop-dist/src/main/compose/ozone/docker-config index 50abb18..1b75c01 100644 --- a/hadoop-dist/src/main/compose/ozone/docker-config +++ b/hadoop-dist/src/main/compose/ozone/docker-config @@ -22,6 +22,7 @@ OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata OZONE-SITE.XML_ozone.handler.type=distributed OZONE-SITE.XML_ozone.scm.client.address=scm +OZONE-SITE.XML_ozone.replication=1 HDFS-SITE.XML_rpc.metrics.quantile.enable=true HDFS-SITE.XML_rpc.metrics.percentiles.intervals=60,300 LOG4J.PROPERTIES_log4j.rootLogger=INFO, stdout http://git-wip-us.apache.org/repos/asf/hadoop/blob/e83719c8/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot -- diff --git a/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot new file mode 100644 index 000..b844cee --- /dev/null +++ b/hadoop-ozone/acceptance-test/src/test/acceptance/ozonefs/ozonesinglenode.robot @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Ozonefs Single Node Test +Library OperatingSystem +Suite Setup Startup Ozone cluster with size 1 +Suite Teardown Teardown Ozone cluster +Resource../commonlib.robot + +*** Variables *** +${COMPOSEFILE} ${CURDIR}/docker-compose.yaml +${PROJECTDIR} ${CURDIR}/../../../../../.. + + +*** Test Cases *** +Create volume and bucket +Execute on datanodeozone oz -createVolume http://ozoneManager/fstest -user bilbo -quota 100TB -root +Execute on datanodeozone oz -createBucket http://ozoneManager/fstest/bucket1 + +Check volume from ozonefs +${result} = Execute on hadooplasthdfs dfs -ls o3://bucket1.fstest/ + +Create directory from ozonefs +Execute on hadooplasthdfs dfs -mkdir -p o3://bucket1.fstest/testdir/deep +${result} = Execute on ozoneManager ozone oz -listKey o3://ozoneManager/fstest/bucket1 | grep -v WARN | jq -r '.[].keyName' +Should contain${result} testdir/deep +Test key handling +Execute on datanodeozone oz -putKey o3://ozoneManager/fstest/bucket1/key1 -file NOTICE.txt -replicationFactor 1 +Execute on datanoderm -f NOTICE.txt.1 +Execute on datanodeozone oz -getKey o3://ozoneManager/fstest/bucket1/key1 -file NOTICE.txt.1 +Execute on datanodels -l NOTICE.txt.1 +${result} = Execute on datanodeozone oz -infoKey o3://ozoneManager/fstest/bucket1/key1 | grep -Ev 'Removed|WARN|DEBUG|ERROR|INFO|TRACE' | jq -r '. |
[03/50] [abbrv] hadoop git commit: YARN-8596. Allow SQLFederationStateStore to submit the same app in the same subcluster. Contributed by Giovanni Matteo Fumarola.
YARN-8596. Allow SQLFederationStateStore to submit the same app in the same subcluster. Contributed by Giovanni Matteo Fumarola. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79091cf7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79091cf7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79091cf7 Branch: refs/heads/YARN-7402 Commit: 79091cf76f6e966f64ac1d65e43e95782695e678 Parents: 2cccf40 Author: Inigo Goiri Authored: Fri Jul 27 15:23:57 2018 -0700 Committer: Inigo Goiri Committed: Fri Jul 27 15:23:57 2018 -0700 -- .../store/impl/SQLFederationStateStore.java | 14 +++--- .../store/impl/FederationStateStoreBaseTest.java | 19 +++ 2 files changed, 26 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/79091cf7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java index e62dcaf..273118a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/federation/store/impl/SQLFederationStateStore.java @@ -564,13 +564,13 @@ public class SQLFederationStateStore implements FederationStateStore { // Check the ROWCOUNT value, if it is equal to 0 it means the call // did not add a new application into FederationStateStore if (cstmt.getInt(4) == 0) { - String errMsg = "The application " + appId - + " was not insert into the StateStore"; - FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); -} -// Check the ROWCOUNT value, if it is different from 1 it means the call -// had a wrong behavior. Maybe the database is not set correctly. -if (cstmt.getInt(4) != 1) { + LOG.info( + "The application {} was not inserted in the StateStore because it" + + " was already present in SubCluster {}", + appId, subClusterHome); +} else if (cstmt.getInt(4) != 1) { + // Check the ROWCOUNT value, if it is different from 1 it means the + // call had a wrong behavior. Maybe the database is not set correctly. String errMsg = "Wrong behavior during the insertion of SubCluster " + subClusterId; FederationStateStoreUtils.logAndThrowStoreException(LOG, errMsg); http://git-wip-us.apache.org/repos/asf/hadoop/blob/79091cf7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java index 15cc0f0..b17f870 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/server/federation/store/impl/FederationStateStoreBaseTest.java @@ -282,6 +282,25 @@ public abstract class FederationStateStoreBaseTest { } @Test + public void testAddApplicationHomeSubClusterAppAlreadyExistsInTheSameSC() + throws Exception { +ApplicationId appId = ApplicationId.newInstance(1, 1); +SubClusterId subClusterId1 = SubClusterId.newInstance("SC1"); +addApplicationHomeSC(appId, subClusterId1); + +ApplicationHomeSubCluster ahsc2 = +ApplicationHomeSubCluster.newInstance(appId, subClusterId1); + +AddApplicationHomeSubClusterResponse response = +stateStore.addApplicationHomeSubCluster( +
[05/50] [abbrv] hadoop git commit: HADOOP-15636. Follow-up from HADOOP-14918; restoring test under new name. Contributed by Gabor Bota.
HADOOP-15636. Follow-up from HADOOP-14918; restoring test under new name. Contributed by Gabor Bota. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59adeb8d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59adeb8d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59adeb8d Branch: refs/heads/YARN-7402 Commit: 59adeb8d7f2f04bc56d37b2a2e65596fee6e4894 Parents: ed9d60e Author: Sean Mackrory Authored: Thu Jul 26 10:25:47 2018 -0600 Committer: Sean Mackrory Committed: Fri Jul 27 18:23:29 2018 -0600 -- .../s3a/s3guard/ITestDynamoDBMetadataStore.java | 649 +++ 1 file changed, 649 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/59adeb8d/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java -- diff --git a/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java new file mode 100644 index 000..a597858 --- /dev/null +++ b/hadoop-tools/hadoop-aws/src/test/java/org/apache/hadoop/fs/s3a/s3guard/ITestDynamoDBMetadataStore.java @@ -0,0 +1,649 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.s3a.s3guard; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.net.URI; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; + +import com.amazonaws.services.dynamodbv2.document.DynamoDB; +import com.amazonaws.services.dynamodbv2.document.Item; +import com.amazonaws.services.dynamodbv2.document.PrimaryKey; +import com.amazonaws.services.dynamodbv2.document.Table; +import com.amazonaws.services.dynamodbv2.model.ProvisionedThroughputDescription; +import com.amazonaws.services.dynamodbv2.model.ResourceNotFoundException; +import com.amazonaws.services.dynamodbv2.model.TableDescription; + +import com.google.common.collect.Lists; +import org.apache.commons.collections.CollectionUtils; +import org.apache.hadoop.fs.contract.s3a.S3AContract; +import org.apache.hadoop.fs.s3a.Constants; +import org.apache.hadoop.fs.s3a.Tristate; + +import org.apache.hadoop.io.IOUtils; +import org.junit.AfterClass; +import org.junit.Assume; +import org.junit.BeforeClass; +import org.junit.Test; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.s3a.S3AFileStatus; +import org.apache.hadoop.fs.s3a.S3AFileSystem; +import org.apache.hadoop.security.UserGroupInformation; + +import static org.apache.hadoop.fs.s3a.Constants.*; +import static org.apache.hadoop.fs.s3a.S3ATestUtils.*; +import static org.apache.hadoop.fs.s3a.s3guard.PathMetadataDynamoDBTranslation.*; +import static org.apache.hadoop.fs.s3a.s3guard.DynamoDBMetadataStore.*; +import static org.apache.hadoop.test.LambdaTestUtils.*; + +/** + * Test that {@link DynamoDBMetadataStore} implements {@link MetadataStore}. + * + * In this integration test, we use a real AWS DynamoDB. A + * {@link DynamoDBMetadataStore} object is created in the @BeforeClass method, + * and shared for all test in the @BeforeClass method. You will be charged + * bills for AWS S3 or DynamoDB when you run these tests. + * + * According to the base class, every test case will have independent contract + * to create a new {@link S3AFileSystem} instance and initializes it. + * A table will be created and shared between the tests, + */ +public class ITestDynamoDBMetadataStore extends MetadataStoreTestBase { + private static final Logger LOG = + LoggerFactory.getLogger(ITestDynamoDBMetadataStore.class); + public static final PrimaryKey + VERSION_MARKER_PRIMARY_KEY = createVersionMarkerPrimaryKey( + DynamoDBMetadataStore.VERSION_MARKER); + + private S3AFileSystem
[33/50] [abbrv] hadoop git commit: YARN-8600. RegistryDNS hang when remote lookup does not reply. Contributed by Eric Yang
YARN-8600. RegistryDNS hang when remote lookup does not reply. Contributed by Eric Yang Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/603a5747 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/603a5747 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/603a5747 Branch: refs/heads/YARN-7402 Commit: 603a57476ce0bf9514f0432a235f29432ca4c323 Parents: 67c65da Author: Shane Kumpf Authored: Wed Aug 1 12:22:01 2018 -0600 Committer: Shane Kumpf Committed: Wed Aug 1 12:22:01 2018 -0600 -- .../hadoop/registry/server/dns/LookupTask.java | 39 .../hadoop/registry/server/dns/RegistryDNS.java | 21 --- .../registry/server/dns/TestRegistryDNS.java| 8 3 files changed, 63 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/603a5747/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java new file mode 100644 index 000..c2fc4a9 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/LookupTask.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.registry.server.dns; + +import java.util.concurrent.Callable; + +import org.xbill.DNS.Lookup; +import org.xbill.DNS.Name; +import org.xbill.DNS.Record; + +public class LookupTask implements Callable { + + private Name name; + private int type; + + public LookupTask(Name name, int type) { +this.name = name; +this.type = type; + } + + @Override + public Record[] call() throws Exception { +return new Lookup(name, type).run(); + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/603a5747/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java index 0022843..52e49a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/server/dns/RegistryDNS.java @@ -99,9 +99,13 @@ import java.util.Properties; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.ThreadFactory; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -941,7 +945,7 @@ public class RegistryDNS extends AbstractService implements DNSOperations, * @param portlocal port. * @throws IOException if the UDP processing fails. */ - private void serveNIOUDP(DatagramChannel channel, + private synchronized void serveNIOUDP(DatagramChannel channel, InetAddress addr, int port) throws Exception { SocketAddress remoteAddress = null; try { @@ -1177,13 +1181,20 @@ public class RegistryDNS extends AbstractService implements DNSOperations, * @return DNS records */ protected Record[]
[36/50] [abbrv] hadoop git commit: YARN-8593. Add RM web service endpoint to get user information. Contributed by Akhil PB.
YARN-8593. Add RM web service endpoint to get user information. Contributed by Akhil PB. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/735b4925 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/735b4925 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/735b4925 Branch: refs/heads/YARN-7402 Commit: 735b4925569541fb8e65dc0c668ccc2aa2ffb30b Parents: 23f3942 Author: Sunil G Authored: Thu Aug 2 08:34:09 2018 +0530 Committer: Sunil G Committed: Thu Aug 2 08:34:09 2018 +0530 -- .../server/resourcemanager/ResourceManager.java | 4 ++ .../resourcemanager/webapp/RMWSConsts.java | 3 + .../webapp/RMWebServiceProtocol.java| 10 +++ .../resourcemanager/webapp/RMWebServices.java | 12 .../webapp/dao/ClusterUserInfo.java | 64 .../webapp/TestRMWebServices.java | 21 +++ .../webapp/DefaultRequestInterceptorREST.java | 8 +++ .../webapp/FederationInterceptorREST.java | 6 ++ .../server/router/webapp/RouterWebServices.java | 12 .../webapp/MockRESTRequestInterceptor.java | 6 ++ .../PassThroughRESTRequestInterceptor.java | 6 ++ 11 files changed, 152 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index f14d440..bb85b67 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -222,6 +222,10 @@ public class ResourceManager extends CompositeService implements Recoverable { return clusterTimeStamp; } + public String getRMLoginUser() { +return rmLoginUGI.getShortUserName(); + } + @VisibleForTesting protected static void setClusterTimeStamp(long timestamp) { clusterTimeStamp = timestamp; http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java index 9822878..a3fd2a9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWSConsts.java @@ -36,6 +36,9 @@ public final class RMWSConsts { /** Path for {@code RMWebServiceProtocol#getClusterInfo}. */ public static final String INFO = "/info"; + /** Path for {@code RMWebServiceProtocol#getClusterUserInfo}. */ + public static final String CLUSTER_USER_INFO = "/userinfo"; + /** Path for {@code RMWebServiceProtocol#getClusterMetricsInfo}. */ public static final String METRICS = "/metrics"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/735b4925/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServiceProtocol.java index 85ea07d..a310853 100644 ---
[08/50] [abbrv] hadoop git commit: HDDS-273. DeleteLog entries should be purged only after corresponding DNs commit the transaction. Contributed by Lokesh Jain.
HDDS-273. DeleteLog entries should be purged only after corresponding DNs commit the transaction. Contributed by Lokesh Jain. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/feb795b5 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/feb795b5 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/feb795b5 Branch: refs/heads/YARN-7402 Commit: feb795b58d2a3c20bdbddea1638a83f6637d3fc9 Parents: 6b038f8 Author: Mukul Kumar Singh Authored: Sun Jul 29 01:02:24 2018 +0530 Committer: Mukul Kumar Singh Committed: Sun Jul 29 01:02:24 2018 +0530 -- .../DeleteBlocksCommandHandler.java | 12 +- .../StorageContainerDatanodeProtocol.proto | 4 +- .../hadoop/hdds/scm/block/BlockManagerImpl.java | 2 +- .../block/DatanodeDeletedBlockTransactions.java | 47 ++-- .../hadoop/hdds/scm/block/DeletedBlockLog.java | 23 +- .../hdds/scm/block/DeletedBlockLogImpl.java | 123 ++ .../scm/server/SCMDatanodeProtocolServer.java | 19 +- .../hdds/scm/block/TestDeletedBlockLog.java | 232 ++- 8 files changed, 256 insertions(+), 206 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb795b5/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index 9640f93..b0d4cbc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -113,8 +113,8 @@ public class DeleteBlocksCommandHandler implements CommandHandler { DeleteBlockTransactionResult.Builder txResultBuilder = DeleteBlockTransactionResult.newBuilder(); txResultBuilder.setTxID(entry.getTxID()); +long containerId = entry.getContainerID(); try { - long containerId = entry.getContainerID(); Container cont = containerSet.getContainer(containerId); if (cont == null) { throw new StorageContainerException("Unable to find the container " @@ -126,7 +126,8 @@ public class DeleteBlocksCommandHandler implements CommandHandler { KeyValueContainerData containerData = (KeyValueContainerData) cont.getContainerData(); deleteKeyValueContainerBlocks(containerData, entry); -txResultBuilder.setSuccess(true); +txResultBuilder.setContainerID(containerId) +.setSuccess(true); break; default: LOG.error( @@ -136,9 +137,12 @@ public class DeleteBlocksCommandHandler implements CommandHandler { } catch (IOException e) { LOG.warn("Failed to delete blocks for container={}, TXID={}", entry.getContainerID(), entry.getTxID(), e); - txResultBuilder.setSuccess(false); + txResultBuilder.setContainerID(containerId) + .setSuccess(false); } -resultBuilder.addResults(txResultBuilder.build()); +resultBuilder.addResults(txResultBuilder.build()) +.setDnId(context.getParent().getDatanodeDetails() +.getUuid().toString()); }); ContainerBlocksDeletionACKProto blockDeletionACK = resultBuilder.build(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/feb795b5/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto -- diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto index d89567b..0c52efb 100644 --- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto +++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto @@ -229,9 +229,11 @@ message DeletedBlocksTransaction { message ContainerBlocksDeletionACKProto { message DeleteBlockTransactionResult { required int64 txID = 1; -required bool success = 2; +required int64 containerID = 2; +required bool success = 3; } repeated DeleteBlockTransactionResult results = 1; + required string dnId = 2; } // SendACK response
[07/50] [abbrv] hadoop git commit: HDDS-246. Datanode should throw BlockNotCommittedException for uncommitted blocks to Ozone Client. Contributed by Shashikant Banerjee.
HDDS-246. Datanode should throw BlockNotCommittedException for uncommitted blocks to Ozone Client. Contributed by Shashikant Banerjee. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b038f82 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b038f82 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b038f82 Branch: refs/heads/YARN-7402 Commit: 6b038f82da8fa8c1c4f1e1bf448eacc6dd523044 Parents: 3d58684 Author: Mukul Kumar Singh Authored: Sat Jul 28 22:04:11 2018 +0530 Committer: Mukul Kumar Singh Committed: Sat Jul 28 22:04:11 2018 +0530 -- .../main/proto/DatanodeContainerProtocol.proto | 1 + .../common/impl/OpenContainerBlockMap.java | 12 ++ .../container/keyvalue/KeyValueHandler.java | 12 -- .../ozone/scm/TestCommittedBlockLengthAPI.java | 45 +++- 4 files changed, 57 insertions(+), 13 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b038f82/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto -- diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto index a3c4467..6969fa6 100644 --- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto +++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto @@ -138,6 +138,7 @@ enum Result { CONTAINER_FILES_CREATE_ERROR = 32; CONTAINER_CHECKSUM_ERROR = 33; UNKNOWN_CONTAINER_TYPE = 34; + BLOCK_NOT_COMMITTED = 35; } /** http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b038f82/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java index 6a93c9d..8e2667d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java @@ -129,6 +129,18 @@ public class OpenContainerBlockMap { -> blocks.removeAndGetSize(blockID.getLocalID()) == 0? null: blocks); } + /** + * Returns true if the block exists in the map, false otherwise + * + * @param blockID + * @return True, if it exists, false otherwise + */ + public boolean checkIfBlockExists(BlockID blockID) { +KeyDataMap keyDataMap = containers.get(blockID.getContainerID()); +return keyDataMap == null ? false : +keyDataMap.get(blockID.getLocalID()) != null; + } + @VisibleForTesting KeyDataMap getKeyDataMap(long containerId) { return containers.get(containerId); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b038f82/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index b08e128..0b26a14 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -91,6 +91,8 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .Result.GET_SMALL_FILE_ERROR; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .Result.PUT_SMALL_FILE_ERROR; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos +.Result.BLOCK_NOT_COMMITTED; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .Stage; @@ -494,10 +496,14 @@ public class KeyValueHandler extends Handler { long blockLength; try { - BlockID blockID = BlockID.getFromProtobuf( - request.getGetCommittedBlockLength().getBlockID()); + BlockID blockID = BlockID + .getFromProtobuf(request.getGetCommittedBlockLength().getBlockID()); + // Check if it really exists in the openContainerBlockMap + if (openContainerBlockMap.checkIfBlockExists(blockID)) { +String msg = "Block " + blockID + " is not committed yet."; +throw new StorageContainerException(msg, BLOCK_NOT_COMMITTED); + }
[06/50] [abbrv] hadoop git commit: YARN-8558. NM recovery level db not cleaned up properly on container finish. Contributed by Bibin A Chundatt.
YARN-8558. NM recovery level db not cleaned up properly on container finish. Contributed by Bibin A Chundatt. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d586841 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d586841 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d586841 Branch: refs/heads/YARN-7402 Commit: 3d586841aba99c7df98b2b4d3e48ec0144bad086 Parents: 59adeb8 Author: bibinchundatt Authored: Sat Jul 28 20:52:39 2018 +0530 Committer: bibinchundatt Committed: Sat Jul 28 20:52:39 2018 +0530 -- .../recovery/NMLeveldbStateStoreService.java | 14 ++ .../recovery/TestNMLeveldbStateStoreService.java | 7 +++ 2 files changed, 17 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d586841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java index 44f5e18..67f642d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java @@ -143,9 +143,9 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { NM_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX; private static final String CONTAINER_TOKENS_KEY_PREFIX = "ContainerTokens/"; - private static final String CONTAINER_TOKENS_CURRENT_MASTER_KEY = + private static final String CONTAINER_TOKEN_SECRETMANAGER_CURRENT_MASTER_KEY = CONTAINER_TOKENS_KEY_PREFIX + CURRENT_MASTER_KEY_SUFFIX; - private static final String CONTAINER_TOKENS_PREV_MASTER_KEY = + private static final String CONTAINER_TOKEN_SECRETMANAGER_PREV_MASTER_KEY = CONTAINER_TOKENS_KEY_PREFIX + PREV_MASTER_KEY_SUFFIX; private static final String LOG_DELETER_KEY_PREFIX = "LogDeleters/"; @@ -658,6 +658,12 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { batch.delete(bytes(keyPrefix + CONTAINER_KILLED_KEY_SUFFIX)); batch.delete(bytes(keyPrefix + CONTAINER_EXIT_CODE_KEY_SUFFIX)); batch.delete(bytes(keyPrefix + CONTAINER_UPDATE_TOKEN_SUFFIX)); +batch.delete(bytes(keyPrefix + CONTAINER_START_TIME_KEY_SUFFIX)); +batch.delete(bytes(keyPrefix + CONTAINER_LOG_DIR_KEY_SUFFIX)); +batch.delete(bytes(keyPrefix + CONTAINER_VERSION_KEY_SUFFIX)); +batch.delete(bytes(keyPrefix + CONTAINER_REMAIN_RETRIES_KEY_SUFFIX)); +batch.delete(bytes(keyPrefix + CONTAINER_RESTART_TIMES_SUFFIX)); +batch.delete(bytes(keyPrefix + CONTAINER_WORK_DIR_KEY_SUFFIX)); List unknownKeysForContainer = containerUnknownKeySuffixes .removeAll(containerId); for (String unknownKeySuffix : unknownKeysForContainer) { @@ -1169,13 +1175,13 @@ public class NMLeveldbStateStoreService extends NMStateStoreService { @Override public void storeContainerTokenCurrentMasterKey(MasterKey key) throws IOException { -storeMasterKey(CONTAINER_TOKENS_CURRENT_MASTER_KEY, key); +storeMasterKey(CONTAINER_TOKEN_SECRETMANAGER_CURRENT_MASTER_KEY, key); } @Override public void storeContainerTokenPreviousMasterKey(MasterKey key) throws IOException { -storeMasterKey(CONTAINER_TOKENS_PREV_MASTER_KEY, key); +storeMasterKey(CONTAINER_TOKEN_SECRETMANAGER_PREV_MASTER_KEY, key); } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d586841/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/recovery/TestNMLeveldbStateStoreService.java
[32/50] [abbrv] hadoop git commit: YARN-8403. Change the log level for fail to download resource from INFO to ERROR. Contributed by Eric Yang
YARN-8403. Change the log level for fail to download resource from INFO to ERROR. Contributed by Eric Yang Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67c65da2 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67c65da2 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67c65da2 Branch: refs/heads/YARN-7402 Commit: 67c65da261464a0dccb63dc27668109a52e05714 Parents: d920b9d Author: Billie Rinaldi Authored: Wed Aug 1 08:51:18 2018 -0700 Committer: Billie Rinaldi Committed: Wed Aug 1 08:51:40 2018 -0700 -- .../localizer/ResourceLocalizationService.java | 16 +++- .../localizer/TestResourceLocalizationService.java | 3 +++ 2 files changed, 14 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/67c65da2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index 4ca6720..3834ece 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -969,11 +969,17 @@ public class ResourceLocalizationService extends CompositeService .getDU(new File(local.toUri(); assoc.getResource().unlock(); } catch (ExecutionException e) { - LOG.info("Failed to download resource " + assoc.getResource(), - e.getCause()); - LocalResourceRequest req = assoc.getResource().getRequest(); - publicRsrc.handle(new ResourceFailedLocalizationEvent(req, - e.getMessage())); + String user = assoc.getContext().getUser(); + ApplicationId applicationId = assoc.getContext().getContainerId().getApplicationAttemptId().getApplicationId(); + LocalResourcesTracker tracker = +getLocalResourcesTracker(LocalResourceVisibility.APPLICATION, user, applicationId); + final String diagnostics = "Failed to download resource " + + assoc.getResource() + " " + e.getCause(); + tracker.handle(new ResourceFailedLocalizationEvent( + assoc.getResource().getRequest(), diagnostics)); + publicRsrc.handle(new ResourceFailedLocalizationEvent( + assoc.getResource().getRequest(), diagnostics)); + LOG.error(diagnostics); assoc.getResource().unlock(); } catch (CancellationException e) { // ignore; shutting down http://git-wip-us.apache.org/repos/asf/hadoop/blob/67c65da2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java index 4d03f15..2b9148e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java @@ -2398,6 +2398,9 @@ public class TestResourceLocalizationService { // Waiting for resource to change into FAILED state. Assert.assertTrue(waitForResourceState(lr, spyService, req,
[01/50] [abbrv] hadoop git commit: YARN-8566. Add diagnostic message for unschedulable containers (snemeth via rkanter) [Forced Update!]
Repository: hadoop Updated Branches: refs/heads/YARN-7402 3e1c46077 -> b8e718082 (forced update) YARN-8566. Add diagnostic message for unschedulable containers (snemeth via rkanter) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fecbac49 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fecbac49 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fecbac49 Branch: refs/heads/YARN-7402 Commit: fecbac499e2ae6b3334773a997d454a518f43e01 Parents: b429f19 Author: Robert Kanter Authored: Fri Jul 27 14:32:34 2018 -0700 Committer: Robert Kanter Committed: Fri Jul 27 14:32:34 2018 -0700 -- .../src/site/markdown/ResourceManagerRest.md| 285 +++ 1 file changed, 285 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/fecbac49/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md index a30677c..24c2319 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceManagerRest.md @@ -2326,6 +2326,291 @@ Response Body: ``` +Containers for an Application Attempt API +- + +With Containers for an Application Attempt API you can obtain the list of containers, which belongs to an Application Attempt. + +### URI + + * http://rm-http-address:port/ws/v1/cluster/apps/{appid}/appattempts/{appAttemptId}/containers + +### HTTP Operations Supported + + * GET + +### Query Parameters Supported + + None + +### Elements of the *containers* object + +When you make a request for the list of containers, the information will be returned as an array of container objects. + +containers: + +| Item | Data Type | Description | +|: |: |: | +| containers | array of app container objects(JSON)/zero or more container objects(XML) | The collection of app container objects | + +### Elements of the *container* object + +| Item | Data Type | Description | +|: |: |: | +| containerId | string | The container id | +| allocatedMB | long | The amount of memory allocated for the container in MB | +| allocatedVCores | int | The amount of virtual cores allocated for the container | +| assignedNodeId | string | The node id of the node the attempt ran on | +| priority | int | Allocated priority of the container | +| startedTime | long | The start time of the attempt (in ms since epoch) | +| finishedTime | long | The finish time of the attempt (in ms since epoch) 0 if not finished | +| elapsedTime | long | The elapsed time in ms since the startedTime | +| logUrl | string | The web URL that can be used to check the log for the container | +| containerExitStatus | int | Final exit status of the container | +| containerState | string | State of the container, can be NEW, RUNNING, or COMPLETE | +| nodeHttpAddress | string | The node http address of the node the attempt ran on || +| nodeId | string | The node id of the node the attempt ran on | +| allocatedResources |array of resource(JSON)/zero or more resource objects(XML) | Allocated resources for the container | + +### Elements of the *resource* object +| Item | Data Type | Description | +|: |: |: | +| memory | int | The maximum memory for the container | +| vCores | int | The maximum number of vcores for the container | + +**JSON response** + +HTTP Request: + + GET http://rm-http-address:port/ws/v1/cluster/apps/{appid}/appattempts/{appAttemptId}/containers + +Response Header: + + HTTP/1.1 200 OK + Content-Type: application/json + Transfer-Encoding: chunked + Server: Jetty(6.1.26) + +Response Body: + +```json +{ + "containers" : { +"container": [ + { + "containerId": "container_1531404209605_0008_01_01", + "allocatedMB": "1536", + "allocatedVCores": "1", + "assignedNodeId": "host.domain.com:37814", + "priority": "0", + "startedTime": "1531405909444", + "finishedTime": "0", + "elapsedTime": "4112", + "logUrl": "http://host.domain.com:8042/node/containerlogs/container_1531404209605_0008_01_01/systest;, + "containerExitStatus": "0", + "containerState": "RUNNING", + "nodeHttpAddress": "http://host.domain.com:8042;, + "nodeId": "host.domain.com:37814", + "allocatedResources": [ + { +"key": "memory-mb", +"value": "1536" + }, + { +"key": "vcores", +
[14/50] [abbrv] hadoop git commit: HDDS-287. Add Close ContainerAction to Datanode#StateContext when the container gets full. Contributed by Nanda kumar.
HDDS-287. Add Close ContainerAction to Datanode#StateContext when the container gets full. Contributed by Nanda kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3517a478 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3517a478 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3517a478 Branch: refs/heads/YARN-7402 Commit: 3517a47897457c11096ab57a4cb0b096a838a3ec Parents: 952dc2f Author: Nanda kumar Authored: Mon Jul 30 21:18:42 2018 +0530 Committer: Nanda kumar Committed: Mon Jul 30 21:18:42 2018 +0530 -- .../container/common/impl/HddsDispatcher.java | 63 +++- .../statemachine/DatanodeStateMachine.java | 2 +- .../common/statemachine/StateContext.java | 14 +- .../container/ozoneimpl/OzoneContainer.java | 6 +- .../common/impl/TestHddsDispatcher.java | 152 +++ .../container/common/impl/package-info.java | 22 +++ .../common/interfaces/TestHandler.java | 4 +- .../container/ozoneimpl/TestOzoneContainer.java | 2 +- .../ozone/container/common/TestEndPoint.java| 12 +- .../common/impl/TestCloseContainerHandler.java | 2 +- .../container/metrics/TestContainerMetrics.java | 2 +- .../container/ozoneimpl/TestOzoneContainer.java | 2 +- .../container/server/TestContainerServer.java | 2 +- .../genesis/BenchMarkDatanodeDispatcher.java| 6 +- 14 files changed, 270 insertions(+), 21 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3517a478/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 3d418e5..ee232db 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -21,12 +21,21 @@ package org.apache.hadoop.ozone.container.common.impl; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; +import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto +.StorageContainerDatanodeProtocolProtos.ContainerInfo; +import org.apache.hadoop.hdds.protocol.proto +.StorageContainerDatanodeProtocolProtos.ContainerAction; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos @@ -35,11 +44,14 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ContainerType; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos +.ContainerLifeCycleState; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Map; +import java.util.Optional; /** * Ozone Container dispatcher takes a call from the netty server and routes it @@ -53,6 +65,8 @@ public class HddsDispatcher implements ContainerDispatcher { private final Configuration conf; private final ContainerSet containerSet; private final VolumeSet volumeSet; + private final StateContext context; + private final float containerCloseThreshold; private String scmID; private ContainerMetrics metrics; @@ -61,10 +75,11 @@ public class HddsDispatcher implements ContainerDispatcher { * XceiverServerHandler. */ public HddsDispatcher(Configuration config, ContainerSet contSet, - VolumeSet volumes) { + VolumeSet volumes, StateContext context) { this.conf = config; this.containerSet = contSet;
[02/50] [abbrv] hadoop git commit: YARN-8517. getContainer and getContainers ResourceManager REST API methods are not documented (snemeth via rkanter)
YARN-8517. getContainer and getContainers ResourceManager REST API methods are not documented (snemeth via rkanter) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cccf406 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cccf406 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cccf406 Branch: refs/heads/YARN-7402 Commit: 2cccf4061cc4021c48e29879700dbc94f832b7d1 Parents: fecbac4 Author: Robert Kanter Authored: Fri Jul 27 14:35:03 2018 -0700 Committer: Robert Kanter Committed: Fri Jul 27 14:35:03 2018 -0700 -- .../InvalidResourceRequestException.java| 36 ++ .../resourcemanager/DefaultAMSProcessor.java| 23 +- .../scheduler/SchedulerUtils.java | 55 +- .../scheduler/TestSchedulerUtils.java | 630 ++- 4 files changed, 430 insertions(+), 314 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cccf406/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java index f4fd2fa..1ea9eef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/InvalidResourceRequestException.java @@ -30,19 +30,55 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest; * */ public class InvalidResourceRequestException extends YarnException { + public static final String LESS_THAN_ZERO_RESOURCE_MESSAGE_TEMPLATE = + "Invalid resource request! Cannot allocate containers as " + + "requested resource is less than 0! " + + "Requested resource type=[%s], " + "Requested resource=%s"; + + public static final String GREATER_THAN_MAX_RESOURCE_MESSAGE_TEMPLATE = + "Invalid resource request! Cannot allocate containers as " + + "requested resource is greater than " + + "maximum allowed allocation. " + + "Requested resource type=[%s], " + + "Requested resource=%s, maximum allowed allocation=%s, " + + "please note that maximum allowed allocation is calculated " + + "by scheduler based on maximum resource of registered " + + "NodeManagers, which might be less than configured " + + "maximum allocation=%s"; + + public static final String UNKNOWN_REASON_MESSAGE_TEMPLATE = + "Invalid resource request! " + + "Cannot allocate containers for an unknown reason! " + + "Requested resource type=[%s], Requested resource=%s"; + + public enum InvalidResourceType { +LESS_THAN_ZERO, GREATER_THEN_MAX_ALLOCATION, UNKNOWN; + } private static final long serialVersionUID = 13498237L; + private final InvalidResourceType invalidResourceType; public InvalidResourceRequestException(Throwable cause) { super(cause); +this.invalidResourceType = InvalidResourceType.UNKNOWN; } public InvalidResourceRequestException(String message) { +this(message, InvalidResourceType.UNKNOWN); + } + + public InvalidResourceRequestException(String message, + InvalidResourceType invalidResourceType) { super(message); +this.invalidResourceType = invalidResourceType; } public InvalidResourceRequestException(String message, Throwable cause) { super(message, cause); +this.invalidResourceType = InvalidResourceType.UNKNOWN; } + public InvalidResourceType getInvalidResourceType() { +return invalidResourceType; + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cccf406/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/DefaultAMSProcessor.java index 71558a7..43f73e4 100644 ---
[25/50] [abbrv] hadoop git commit: HDDS-271. Create a block iterator to iterate blocks in a container. Contributed by Bharat Viswanadham.
HDDS-271. Create a block iterator to iterate blocks in a container. Contributed by Bharat Viswanadham. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c835fc08 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c835fc08 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c835fc08 Branch: refs/heads/YARN-7402 Commit: c835fc08adf556d2f848f2f241155cbfe3375695 Parents: c7ebcd7 Author: Bharat Viswanadham Authored: Tue Jul 31 16:26:09 2018 -0700 Committer: Bharat Viswanadham Committed: Tue Jul 31 16:26:09 2018 -0700 -- .../apache/hadoop/utils/MetaStoreIterator.java | 2 +- .../common/interfaces/BlockIterator.java| 57 .../keyvalue/KeyValueBlockIterator.java | 148 ++ .../keyvalue/TestKeyValueBlockIterator.java | 275 +++ 4 files changed, 481 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c835fc08/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java index 758d194..52d0a3e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetaStoreIterator.java @@ -24,7 +24,7 @@ import java.util.Iterator; * Iterator for MetaDataStore DB. * @param */ -interface MetaStoreIterator extends Iterator { +public interface MetaStoreIterator extends Iterator { /** * seek to first entry. http://git-wip-us.apache.org/repos/asf/hadoop/blob/c835fc08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java new file mode 100644 index 000..f6931e3 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/BlockIterator.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.container.common.interfaces; + + +import java.io.IOException; +import java.util.NoSuchElementException; + +/** + * Block Iterator for container. Each container type need to implement this + * interface. + * @param + */ +public interface BlockIterator { + + /** + * This checks if iterator has next element. If it has returns true, + * otherwise false. + * @return boolean + */ + boolean hasNext() throws IOException; + + /** + * Seek to first entry. + */ + void seekToFirst(); + + /** + * Seek to last entry. + */ + void seekToLast(); + + /** + * Get next block in the container. + * @return next block or null if there are no blocks + * @throws IOException + */ + T nextBlock() throws IOException, NoSuchElementException; + + +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/c835fc08/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java new file mode 100644 index 000..f800223 --- /dev/null +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license
[28/50] [abbrv] hadoop git commit: HDDS-226. Client should update block length in OM while committing the key. Contributed by Shashikant Banerjee.
HDDS-226. Client should update block length in OM while committing the key. Contributed by Shashikant Banerjee. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4db753b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4db753b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4db753b Branch: refs/heads/YARN-7402 Commit: f4db753bb6b4648c583722dbe8108973c23ba06f Parents: 6310c0d Author: Mukul Kumar Singh Authored: Wed Aug 1 09:02:43 2018 +0530 Committer: Mukul Kumar Singh Committed: Wed Aug 1 09:03:00 2018 +0530 -- .../ozone/client/io/ChunkGroupOutputStream.java | 22 +++- .../hadoop/ozone/om/helpers/OmKeyArgs.java | 26 --- .../hadoop/ozone/om/helpers/OmKeyInfo.java | 29 ++-- .../ozone/om/helpers/OmKeyLocationInfo.java | 6 +++- ...neManagerProtocolClientSideTranslatorPB.java | 8 - .../src/main/proto/OzoneManagerProtocol.proto | 1 + .../ozone/client/rpc/TestOzoneRpcClient.java| 35 .../hadoop/ozone/om/TestOmBlockVersioning.java | 13 +++- .../apache/hadoop/ozone/om/KeyManagerImpl.java | 4 +++ ...neManagerProtocolServerSideTranslatorPB.java | 5 ++- 10 files changed, 138 insertions(+), 11 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4db753b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java -- diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java index 9443317..83b4dfd 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java @@ -76,7 +76,7 @@ public class ChunkGroupOutputStream extends OutputStream { private final int chunkSize; private final String requestID; private boolean closed; - + private List locationInfoList; /** * A constructor for testing purpose only. */ @@ -91,6 +91,7 @@ public class ChunkGroupOutputStream extends OutputStream { chunkSize = 0; requestID = null; closed = false; +locationInfoList = null; } /** @@ -133,6 +134,7 @@ public class ChunkGroupOutputStream extends OutputStream { this.xceiverClientManager = xceiverClientManager; this.chunkSize = chunkSize; this.requestID = requestId; +this.locationInfoList = new ArrayList<>(); LOG.debug("Expecting open key with one block, but got" + info.getKeyLocationVersions().size()); } @@ -196,8 +198,19 @@ public class ChunkGroupOutputStream extends OutputStream { streamEntries.add(new ChunkOutputStreamEntry(subKeyInfo.getBlockID(), keyArgs.getKeyName(), xceiverClientManager, xceiverClient, requestID, chunkSize, subKeyInfo.getLength())); +// reset the original length to zero here. It will be updated as and when +// the data gets written. +subKeyInfo.setLength(0); +locationInfoList.add(subKeyInfo); } + private void incrementBlockLength(int index, long length) { +if (locationInfoList != null) { + OmKeyLocationInfo locationInfo = locationInfoList.get(index); + long originalLength = locationInfo.getLength(); + locationInfo.setLength(originalLength + length); +} + } @VisibleForTesting public long getByteOffset() { @@ -222,6 +235,7 @@ public class ChunkGroupOutputStream extends OutputStream { } ChunkOutputStreamEntry entry = streamEntries.get(currentStreamIndex); entry.write(b); +incrementBlockLength(currentStreamIndex, 1); if (entry.getRemaining() <= 0) { currentStreamIndex += 1; } @@ -276,6 +290,7 @@ public class ChunkGroupOutputStream extends OutputStream { ChunkOutputStreamEntry current = streamEntries.get(currentStreamIndex); int writeLen = Math.min(len, (int)current.getRemaining()); current.write(b, off, writeLen); + incrementBlockLength(currentStreamIndex, writeLen); if (current.getRemaining() <= 0) { currentStreamIndex += 1; } @@ -328,8 +343,13 @@ public class ChunkGroupOutputStream extends OutputStream { } if (keyArgs != null) { // in test, this could be null + long length = + locationInfoList.parallelStream().mapToLong(e -> e.getLength()).sum(); + Preconditions.checkState(byteOffset == length); keyArgs.setDataSize(byteOffset); + keyArgs.setLocationInfoList(locationInfoList); omClient.commitKey(keyArgs, openID); + locationInfoList = null; } else { LOG.warn("Closing
[17/50] [abbrv] hadoop git commit: HDDS-293. Reduce memory usage and object creation in KeyData.
HDDS-293. Reduce memory usage and object creation in KeyData. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee53602a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee53602a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee53602a Branch: refs/heads/YARN-7402 Commit: ee53602a8179e76f4102d9062d0bebe8bb09d875 Parents: 2b39ad2 Author: Tsz Wo Nicholas Sze Authored: Mon Jul 30 15:00:29 2018 -0700 Committer: Tsz Wo Nicholas Sze Committed: Mon Jul 30 15:00:29 2018 -0700 -- .../ozone/container/common/helpers/KeyData.java | 84 + .../common/impl/OpenContainerBlockMap.java | 2 +- .../container/keyvalue/KeyValueHandler.java | 3 - .../container/common/helpers/TestKeyData.java | 119 +++ 4 files changed, 179 insertions(+), 29 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee53602a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java index 1919ed9..84a6f71 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.container.common.helpers; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.client.BlockID; +import com.google.common.base.Preconditions; import java.io.IOException; import java.util.Collections; @@ -35,11 +36,17 @@ public class KeyData { private final Map metadata; /** + * Represent a list of chunks. + * In order to reduce memory usage, chunkList is declared as an {@link Object}. + * When #elements == 0, chunkList is null. + * When #elements == 1, chunkList refers to the only element. + * When #elements > 1, chunkList refers to the list. + * * Please note : when we are working with keys, we don't care what they point * to. So we We don't read chunkinfo nor validate them. It is responsibility * of higher layer like ozone. We just read and write data from network. */ - private List chunks; + private Object chunkList; /** * total size of the key. @@ -73,7 +80,7 @@ public class KeyData { } keyData.setChunks(data.getChunksList()); if (data.hasSize()) { - keyData.setSize(data.getSize()); + Preconditions.checkArgument(data.getSize() == keyData.getSize()); } return keyData; } @@ -86,13 +93,13 @@ public class KeyData { ContainerProtos.KeyData.Builder builder = ContainerProtos.KeyData.newBuilder(); builder.setBlockID(this.blockID.getDatanodeBlockIDProtobuf()); -builder.addAllChunks(this.chunks); for (Map.Entry entry : metadata.entrySet()) { ContainerProtos.KeyValue.Builder keyValBuilder = ContainerProtos.KeyValue.newBuilder(); builder.addMetadata(keyValBuilder.setKey(entry.getKey()) .setValue(entry.getValue()).build()); } +builder.addAllChunks(getChunks()); builder.setSize(size); return builder.build(); } @@ -132,30 +139,65 @@ public class KeyData { metadata.remove(key); } + @SuppressWarnings("unchecked") + private List castChunkList() { +return (List)chunkList; + } + /** * Returns chunks list. * * @return list of chunkinfo. */ public List getChunks() { -return chunks; +return chunkList == null? Collections.emptyList() +: chunkList instanceof ContainerProtos.ChunkInfo? +Collections.singletonList((ContainerProtos.ChunkInfo)chunkList) +: Collections.unmodifiableList(castChunkList()); } /** * Adds chinkInfo to the list */ public void addChunk(ContainerProtos.ChunkInfo chunkInfo) { -if (chunks == null) { - chunks = new ArrayList<>(); +if (chunkList == null) { + chunkList = chunkInfo; +} else { + final List list; + if (chunkList instanceof ContainerProtos.ChunkInfo) { +list = new ArrayList<>(2); +list.add((ContainerProtos.ChunkInfo)chunkList); +chunkList = list; + } else { +list = castChunkList(); + } + list.add(chunkInfo); } -chunks.add(chunkInfo); +size += chunkInfo.getLen(); } /** * removes the chunk. */ - public void removeChunk(ContainerProtos.ChunkInfo chunkInfo) { -chunks.remove(chunkInfo); + public boolean removeChunk(ContainerProtos.ChunkInfo chunkInfo) { +final
[35/50] [abbrv] hadoop git commit: YARN-8610. Fixed initiate upgrade error message. Contributed by Chandni Singh
YARN-8610. Fixed initiate upgrade error message. Contributed by Chandni Singh Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23f39424 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23f39424 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23f39424 Branch: refs/heads/YARN-7402 Commit: 23f394240e1568a38025e63e9dc0842e8c5235f7 Parents: f2e29ac Author: Eric Yang Authored: Wed Aug 1 20:41:43 2018 -0400 Committer: Eric Yang Committed: Wed Aug 1 20:41:43 2018 -0400 -- .../java/org/apache/hadoop/yarn/service/client/ServiceClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/23f39424/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index 4b67998..5668d9f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -257,7 +257,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes, if (!liveService.getState().equals(ServiceState.STABLE)) { String message = service.getName() + " is at " + liveService.getState() - + " state, upgrade can not be invoked when service is STABLE."; + + " state and upgrade can only be initiated when service is STABLE."; LOG.error(message); throw new YarnException(message); } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[29/50] [abbrv] hadoop git commit: YARN-8522. Application fails with InvalidResourceRequestException. (Zian Chen via wangda)
YARN-8522. Application fails with InvalidResourceRequestException. (Zian Chen via wangda) Change-Id: I34dd7fa49bd4d10580c4a78051033b1068d28f1e Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5cc8e991 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5cc8e991 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5cc8e991 Branch: refs/heads/YARN-7402 Commit: 5cc8e99147276a059979813f7fd323dd7d77b248 Parents: f4db753 Author: Wangda Tan Authored: Tue Jul 31 17:48:44 2018 -0700 Committer: Wangda Tan Committed: Tue Jul 31 22:34:53 2018 -0700 -- .../pb/ApplicationSubmissionContextPBImpl.java | 87 +++- 1 file changed, 46 insertions(+), 41 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc8e991/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java index 0c91e18..b30224e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java @@ -84,7 +84,7 @@ extends ApplicationSubmissionContext { viaProto = true; } - public ApplicationSubmissionContextProto getProto() { + public synchronized ApplicationSubmissionContextProto getProto() { mergeLocalToProto(); proto = viaProto ? proto : builder.build(); viaProto = true; @@ -164,7 +164,7 @@ extends ApplicationSubmissionContext { } @Override - public Priority getPriority() { + public synchronized Priority getPriority() { ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; if (this.priority != null) { return this.priority; @@ -177,7 +177,7 @@ extends ApplicationSubmissionContext { } @Override - public void setPriority(Priority priority) { + public synchronized void setPriority(Priority priority) { maybeInitBuilder(); if (priority == null) builder.clearPriority(); @@ -185,7 +185,7 @@ extends ApplicationSubmissionContext { } @Override - public ApplicationId getApplicationId() { + public synchronized ApplicationId getApplicationId() { ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; if (this.applicationId != null) { return applicationId; @@ -198,7 +198,7 @@ extends ApplicationSubmissionContext { } @Override - public void setApplicationId(ApplicationId applicationId) { + public synchronized void setApplicationId(ApplicationId applicationId) { maybeInitBuilder(); if (applicationId == null) builder.clearApplicationId(); @@ -206,7 +206,7 @@ extends ApplicationSubmissionContext { } @Override - public String getApplicationName() { + public synchronized String getApplicationName() { ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; if (!p.hasApplicationName()) { return null; @@ -215,7 +215,7 @@ extends ApplicationSubmissionContext { } @Override - public void setApplicationName(String applicationName) { + public synchronized void setApplicationName(String applicationName) { maybeInitBuilder(); if (applicationName == null) { builder.clearApplicationName(); @@ -225,7 +225,7 @@ extends ApplicationSubmissionContext { } @Override - public String getQueue() { + public synchronized String getQueue() { ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; if (!p.hasQueue()) { return null; @@ -234,7 +234,7 @@ extends ApplicationSubmissionContext { } @Override - public String getApplicationType() { + public synchronized String getApplicationType() { ApplicationSubmissionContextProtoOrBuilder p = viaProto ? proto : builder; if (!p.hasApplicationType()) { return null; @@ -252,13 +252,13 @@ extends ApplicationSubmissionContext { } @Override - public Set getApplicationTags() { + public synchronized Set getApplicationTags() { initApplicationTags(); return this.applicationTags; } @Override - public void setQueue(String queue) { + public synchronized void setQueue(String queue) { maybeInitBuilder(); if (queue == null) {
[12/50] [abbrv] hadoop git commit: HDFS-12716. 'dfs.datanode.failed.volumes.tolerated' to support minimum number of volumes to be available. Contributed by Ranith Sardar and usharani
HDFS-12716. 'dfs.datanode.failed.volumes.tolerated' to support minimum number of volumes to be available. Contributed by Ranith Sardar and usharani Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3108d27e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3108d27e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3108d27e Branch: refs/heads/YARN-7402 Commit: 3108d27edde941d153a58f71fb1096cce2995531 Parents: 63e08ec Author: Brahma Reddy Battula Authored: Mon Jul 30 15:50:04 2018 +0530 Committer: Brahma Reddy Battula Committed: Mon Jul 30 15:50:04 2018 +0530 -- .../hadoop/hdfs/server/datanode/DataNode.java | 7 +++- .../datanode/checker/DatasetVolumeChecker.java | 6 ++- .../checker/StorageLocationChecker.java | 28 ++ .../datanode/fsdataset/impl/FsDatasetImpl.java | 40 .../src/main/resources/hdfs-default.xml | 2 + .../TestDataNodeVolumeFailureToleration.java| 6 ++- 6 files changed, 68 insertions(+), 21 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3108d27e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 7df92f6..1e9c57a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -293,6 +293,8 @@ public class DataNode extends ReconfigurableBase " and rolling upgrades."; static final int CURRENT_BLOCK_FORMAT_VERSION = 1; + public static final int MAX_VOLUME_FAILURE_TOLERATED_LIMIT = -1; + public static final String MAX_VOLUME_FAILURES_TOLERATED_MSG = "should be greater than -1"; /** A list of property that are reconfigurable at runtime. */ private static final List RECONFIGURABLE_PROPERTIES = @@ -1389,10 +1391,11 @@ public class DataNode extends ReconfigurableBase int volFailuresTolerated = dnConf.getVolFailuresTolerated(); int volsConfigured = dnConf.getVolsConfigured(); -if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) { +if (volFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT +|| volFailuresTolerated >= volsConfigured) { throw new DiskErrorException("Invalid value configured for " + "dfs.datanode.failed.volumes.tolerated - " + volFailuresTolerated - + ". Value configured is either less than 0 or >= " + + ". Value configured is either greater than -1 or >= " + "to the number of configured volumes (" + volsConfigured + ")."); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/3108d27e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java index 3889e23..30602c0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/checker/DatasetVolumeChecker.java @@ -28,6 +28,7 @@ import com.google.common.util.concurrent.ListenableFuture; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi; @@ -153,10 +154,11 @@ public class DatasetVolumeChecker { lastAllVolumesCheck = timer.monotonicNow() - minDiskCheckGapMs; -if (maxVolumeFailuresTolerated < 0) { +if (maxVolumeFailuresTolerated < DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT) { throw new DiskErrorException("Invalid value configured for " + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - " - + maxVolumeFailuresTolerated + " (should be non-negative)"); + + maxVolumeFailuresTolerated + " " + + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG); }
[19/50] [abbrv] hadoop git commit: HDDS-305. Datanode StateContext#addContainerActionIfAbsent will add container action even if there already is a ContainerAction. Contributed by Nanda kumar.
HDDS-305. Datanode StateContext#addContainerActionIfAbsent will add container action even if there already is a ContainerAction. Contributed by Nanda kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7631e0ad Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7631e0ad Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7631e0ad Branch: refs/heads/YARN-7402 Commit: 7631e0adaefcccdbee693089b4c391bea4107a19 Parents: 3e06a5d Author: Nanda kumar Authored: Tue Jul 31 17:27:51 2018 +0530 Committer: Nanda kumar Committed: Tue Jul 31 17:27:51 2018 +0530 -- .../ozone/container/common/impl/HddsDispatcher.java| 13 + .../main/proto/StorageContainerDatanodeProtocol.proto | 2 +- .../states/endpoint/TestHeartbeatEndpointTask.java | 5 + 3 files changed, 3 insertions(+), 17 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/7631e0ad/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java -- diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index ee232db..d92eb17 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -168,19 +168,8 @@ public class HddsDispatcher implements ContainerDispatcher { double containerUsedPercentage = 1.0f * containerData.getBytesUsed() / StorageUnit.GB.toBytes(containerData.getMaxSizeGB()); if (containerUsedPercentage >= containerCloseThreshold) { - -ContainerInfo containerInfo = ContainerInfo.newBuilder() -.setContainerID(containerData.getContainerID()) -.setReadCount(containerData.getReadCount()) -.setWriteCount(containerData.getWriteCount()) -.setReadBytes(containerData.getReadBytes()) -.setWriteBytes(containerData.getWriteBytes()) -.setUsed(containerData.getBytesUsed()) -.setState(HddsProtos.LifeCycleState.OPEN) -.build(); - ContainerAction action = ContainerAction.newBuilder() -.setContainer(containerInfo) +.setContainerID(containerData.getContainerID()) .setAction(ContainerAction.Action.CLOSE) .setReason(ContainerAction.Reason.CONTAINER_FULL) .build(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/7631e0ad/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto -- diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto index 0c52efb..71c41e3 100644 --- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto +++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto @@ -157,7 +157,7 @@ message ContainerAction { CONTAINER_FULL = 1; } - required ContainerInfo container = 1; + required int64 containerID = 1; required Action action = 2; optional Reason reason = 3; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/7631e0ad/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java -- diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java index b4d718d..13de11f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java @@ -289,10 +289,7 @@ public class TestHeartbeatEndpointTask { private ContainerAction getContainerAction() { ContainerAction.Builder builder = ContainerAction.newBuilder(); -ContainerInfo containerInfo = ContainerInfo.newBuilder() -.setContainerID(1L) -.build(); -builder.setContainer(containerInfo) +builder.setContainerID(1L) .setAction(ContainerAction.Action.CLOSE)
[27/50] [abbrv] hadoop git commit: YARN-8397. Potential thread leak in ActivitiesManager. Contributed by Rohith Sharma K S.
YARN-8397. Potential thread leak in ActivitiesManager. Contributed by Rohith Sharma K S. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6310c0d1 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6310c0d1 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6310c0d1 Branch: refs/heads/YARN-7402 Commit: 6310c0d17d6422a595f856a55b4f1fb82be43739 Parents: 40f9b0c Author: Sunil G Authored: Wed Aug 1 08:33:01 2018 +0530 Committer: Sunil G Committed: Wed Aug 1 08:33:30 2018 +0530 -- .../scheduler/activities/ActivitiesManager.java | 20 +++- .../scheduler/capacity/CapacityScheduler.java | 1 + 2 files changed, 16 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6310c0d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java index af73ae3..8498c40 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesManager.java @@ -57,6 +57,7 @@ public class ActivitiesManager extends AbstractService { private Thread cleanUpThread; private int timeThreshold = 600 * 1000; private final RMContext rmContext; + private volatile boolean stopped; public ActivitiesManager(RMContext rmContext) { super(ActivitiesManager.class.getName()); @@ -113,7 +114,7 @@ public class ActivitiesManager extends AbstractService { cleanUpThread = new Thread(new Runnable() { @Override public void run() { -while (true) { +while (!stopped && !Thread.currentThread().isInterrupted()) { Iterator>> ite = completedNodeAllocations.entrySet().iterator(); while (ite.hasNext()) { @@ -140,20 +141,29 @@ public class ActivitiesManager extends AbstractService { try { Thread.sleep(5000); - } catch (Exception e) { -// ignore + } catch (InterruptedException e) { +LOG.info(getName() + " thread interrupted"); +break; } } } }); - +cleanUpThread.setName("ActivitiesManager thread."); cleanUpThread.start(); super.serviceStart(); } @Override protected void serviceStop() throws Exception { -cleanUpThread.interrupt(); +stopped = true; +if (cleanUpThread != null) { + cleanUpThread.interrupt(); + try { +cleanUpThread.join(); + } catch (InterruptedException ie) { +LOG.warn("Interrupted Exception while stopping", ie); + } +} super.serviceStop(); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/6310c0d1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 37f56de..0b7fe92 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -439,6 +439,7 @@ public class CapacityScheduler extends public void serviceStop() throws Exception { try { writeLock.lock(); + this.activitiesManager.stop(); if (scheduleAsynchronously && asyncSchedulerThreads != null)
[42/50] [abbrv] hadoop git commit: YARN-8318. [UI2] IP address in component page shows N/A. Contributed by Yesha Vora.
YARN-8318. [UI2] IP address in component page shows N/A. Contributed by Yesha Vora. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5033d7da Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5033d7da Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5033d7da Branch: refs/heads/YARN-7402 Commit: 5033d7da8f6f703d8774492c42e31e9b9cb692a5 Parents: e83719c Author: Sunil G Authored: Thu Aug 2 20:09:24 2018 +0530 Committer: Sunil G Committed: Thu Aug 2 20:09:24 2018 +0530 -- .../src/main/webapp/app/templates/yarn-component-instance/info.hbs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5033d7da/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs index ef517d0..553f4e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-component-instance/info.hbs @@ -59,7 +59,7 @@ IP Address -{{check-availability model.container.ip}} +{{check-availability model.container.ipAddr}} Exit Status Code - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[15/50] [abbrv] hadoop git commit: HADOOP-15637. LocalFs#listLocatedStatus does not filter out hidden .crc files. Contributed by Erik Krogen.
HADOOP-15637. LocalFs#listLocatedStatus does not filter out hidden .crc files. Contributed by Erik Krogen. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e8f952ef Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e8f952ef Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e8f952ef Branch: refs/heads/YARN-7402 Commit: e8f952ef06ae05d2b504300d6f19beb8a052b6f1 Parents: 3517a47 Author: Chen Liang Authored: Mon Jul 30 10:25:07 2018 -0700 Committer: Chen Liang Committed: Mon Jul 30 10:25:07 2018 -0700 -- .../java/org/apache/hadoop/fs/ChecksumFs.java | 37 +++ .../fs/FileContextMainOperationsBaseTest.java | 38 2 files changed, 75 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8f952ef/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java index 75622ad..c56f6e0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java @@ -27,10 +27,12 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.EnumSet; +import java.util.NoSuchElementException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.Progressable; import org.slf4j.Logger; @@ -527,4 +529,39 @@ public abstract class ChecksumFs extends FilterFs { } return results.toArray(new FileStatus[results.size()]); } + + @Override + public RemoteIterator listLocatedStatus(final Path f) + throws AccessControlException, FileNotFoundException, + UnresolvedLinkException, IOException { +final RemoteIterator iter = +getMyFs().listLocatedStatus(f); +return new RemoteIterator() { + + private LocatedFileStatus next = null; + + @Override + public boolean hasNext() throws IOException { +while (next == null && iter.hasNext()) { + LocatedFileStatus unfilteredNext = iter.next(); + if (!isChecksumFile(unfilteredNext.getPath())) { +next = unfilteredNext; + } +} +return next != null; + } + + @Override + public LocatedFileStatus next() throws IOException { +if (!hasNext()) { + throw new NoSuchElementException(); +} +LocatedFileStatus tmp = next; +next = null; +return tmp; + } + +}; + } + } http://git-wip-us.apache.org/repos/asf/hadoop/blob/e8f952ef/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java index 62ecd9f..c07a6ff 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java @@ -369,6 +369,44 @@ public abstract class FileContextMainOperationsBaseTest { pathsIterator = fc.listStatus(getTestRootPath(fc, "test/hadoop/a")); Assert.assertFalse(pathsIterator.hasNext()); } + + @Test + public void testListFiles() throws Exception { +Path[] testDirs = { +getTestRootPath(fc, "test/dir1"), +getTestRootPath(fc, "test/dir1/dir1"), +getTestRootPath(fc, "test/dir2") +}; +Path[] testFiles = { +new Path(testDirs[0], "file1"), +new Path(testDirs[0], "file2"), +new Path(testDirs[1], "file2"), +new Path(testDirs[2], "file1") +}; + +for (Path path : testDirs) { + fc.mkdir(path, FsPermission.getDefault(), true); +} +for (Path p : testFiles) { + FSDataOutputStream out = fc.create(p).build(); + out.writeByte(0); + out.close(); +} + +RemoteIterator filesIterator = +fc.util().listFiles(getTestRootPath(fc, "test"), true); +LocatedFileStatus[] fileStats = +new
[34/50] [abbrv] hadoop git commit: HADOOP-15476. fix logging for split-dns multihome . Contributed by Ajay Kumar.
HADOOP-15476. fix logging for split-dns multihome . Contributed by Ajay Kumar. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2e29acb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2e29acb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2e29acb Branch: refs/heads/YARN-7402 Commit: f2e29acbfa0b7e1fcecbdcf3e791c96114b456a5 Parents: 603a574 Author: Arpit Agarwal Authored: Wed Aug 1 12:32:01 2018 -0700 Committer: Arpit Agarwal Committed: Wed Aug 1 12:32:01 2018 -0700 -- .../src/main/java/org/apache/hadoop/ipc/Client.java| 6 ++ 1 file changed, 2 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2e29acb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index 163e80d..e147048 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -677,7 +677,8 @@ public class Client implements AutoCloseable { this.socket.setReuseAddress(true); localAddr = NetUtils.bindToLocalAddress(localAddr, bindToWildCardAddress); -LOG.debug("Binding {} to {}", principal, localAddr); +LOG.debug("Binding {} to {}", principal, +(bindToWildCardAddress) ? "0.0.0.0" : localAddr); this.socket.bind(new InetSocketAddress(localAddr, 0)); } } @@ -1281,9 +1282,6 @@ public class Client implements AutoCloseable { this.bindToWildCardAddress = conf .getBoolean(CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY, CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_DEFAULT); -LOG.debug("{} set to true. Will bind client sockets to wildcard " -+ "address.", -CommonConfigurationKeys.IPC_CLIENT_BIND_WILDCARD_ADDR_KEY); this.clientId = ClientId.getClientId(); this.sendParamsExecutor = clientExcecutorFactory.refAndGetInstance(); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[31/50] [abbrv] hadoop git commit: YARN-8595. [UI2] Container diagnostic information is missing from container page. Contributed by Akhil PB.
YARN-8595. [UI2] Container diagnostic information is missing from container page. Contributed by Akhil PB. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d920b9db Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d920b9db Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d920b9db Branch: refs/heads/YARN-7402 Commit: d920b9db77be44adc4f8a2a0c2df889af82be04f Parents: a48a0cc Author: Sunil G Authored: Wed Aug 1 14:27:54 2018 +0530 Committer: Sunil G Committed: Wed Aug 1 14:27:54 2018 +0530 -- .../main/webapp/app/models/yarn-app-attempt.js | 1 + .../app/models/yarn-timeline-container.js | 1 + .../webapp/app/serializers/yarn-app-attempt.js | 3 +- .../app/serializers/yarn-timeline-container.js | 6 +-- .../src/main/webapp/app/styles/app.scss | 9 .../templates/components/app-attempt-table.hbs | 6 +++ .../app/templates/components/timeline-view.hbs | 44 ++-- 7 files changed, 51 insertions(+), 19 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js index cffe198..f483695 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app-attempt.js @@ -32,6 +32,7 @@ export default DS.Model.extend({ logsLink: DS.attr('string'), state: DS.attr('string'), appAttemptId: DS.attr('string'), + diagnosticsInfo: DS.attr('string'), appId: Ember.computed("id",function () { var id = this.get("id"); http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js index 7482a2f..9384418 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-timeline-container.js @@ -31,6 +31,7 @@ export default DS.Model.extend({ containerState: DS.attr('string'), nodeHttpAddress: DS.attr('string'), nodeId: DS.attr('string'), + diagnosticsInfo: DS.attr('string'), startTs: function() { return Converter.dateToTimeStamp(this.get("startedTime")); http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js index f8f598b..55f484b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app-attempt.js @@ -40,7 +40,8 @@ export default DS.JSONAPISerializer.extend({ hosts: payload.host, state: payload.appAttemptState, logsLink: payload.logsLink, - appAttemptId: payload.appAttemptId + appAttemptId: payload.appAttemptId, + diagnosticsInfo: payload.diagnosticsInfo } }; http://git-wip-us.apache.org/repos/asf/hadoop/blob/d920b9db/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js index 1322972..99ab6c4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-timeline-container.js @@ -22,11 +22,6 @@ import Converter from 'yarn-ui/utils/converter'; export default DS.JSONAPISerializer.extend({
[20/50] [abbrv] hadoop git commit: HDDS-279. DeleteBlocks command should not be sent for open containers. Contributed by Lokesh Jain.
HDDS-279. DeleteBlocks command should not be sent for open containers. Contributed by Lokesh Jain. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b28bdc7e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b28bdc7e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b28bdc7e Branch: refs/heads/YARN-7402 Commit: b28bdc7e8b488ef0df62a92bcfe7eb74bbe177c1 Parents: 7631e0a Author: Mukul Kumar Singh Authored: Tue Jul 31 19:50:40 2018 +0530 Committer: Mukul Kumar Singh Committed: Tue Jul 31 19:50:40 2018 +0530 -- .../block/DatanodeDeletedBlockTransactions.java | 18 ++-- .../hdds/scm/block/DeletedBlockLogImpl.java | 8 +- .../org/apache/hadoop/ozone/OzoneTestUtils.java | 92 .../ozone/TestStorageContainerManager.java | 8 ++ .../common/TestBlockDeletingService.java| 17 +++- .../commandhandler/TestBlockDeletion.java | 47 -- .../hadoop/ozone/web/client/TestKeys.java | 3 + 7 files changed, 152 insertions(+), 41 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28bdc7e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java index e33a700..25420fe 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java @@ -28,6 +28,8 @@ import java.util.List; import java.util.Set; import java.util.UUID; import java.util.stream.Collectors; + +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; /** @@ -53,21 +55,26 @@ public class DatanodeDeletedBlockTransactions { this.nodeNum = nodeNum; } - public void addTransaction(DeletedBlocksTransaction tx, - Set dnsWithTransactionCommitted) throws IOException { + public boolean addTransaction(DeletedBlocksTransaction tx, + Set dnsWithTransactionCommitted) { Pipeline pipeline = null; try { - pipeline = mappingService.getContainerWithPipeline(tx.getContainerID()) - .getPipeline(); + ContainerWithPipeline containerWithPipeline = + mappingService.getContainerWithPipeline(tx.getContainerID()); + if (containerWithPipeline.getContainerInfo().isContainerOpen()) { +return false; + } + pipeline = containerWithPipeline.getPipeline(); } catch (IOException e) { SCMBlockDeletingService.LOG.warn("Got container info error.", e); + return false; } if (pipeline == null) { SCMBlockDeletingService.LOG.warn( "Container {} not found, continue to process next", tx.getContainerID()); - return; + return false; } for (DatanodeDetails dd : pipeline.getMachines()) { @@ -78,6 +85,7 @@ public class DatanodeDeletedBlockTransactions { addTransactionToDN(dnID, tx); } } +return true; } private void addTransactionToDN(UUID dnID, DeletedBlocksTransaction tx) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/b28bdc7e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java -- diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index 752c9c7..ca4e1d0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -386,9 +386,11 @@ public class DeletedBlockLogImpl implements DeletedBlockLog { .parseFrom(value); if (block.getCount() > -1 && block.getCount() <= maxRetry) { -Set dnsWithTransactionCommitted = transactionToDNsCommitMap -.putIfAbsent(block.getTxID(), new ConcurrentHashSet<>()); -transactions.addTransaction(block, dnsWithTransactionCommitted); +if (transactions.addTransaction(block, +transactionToDNsCommitMap.get(block.getTxID( { + transactionToDNsCommitMap + .putIfAbsent(block.getTxID(), new ConcurrentHashSet<>()); +} }
[48/50] [abbrv] hadoop git commit: YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong)
YARN-6648. [GPG] Add SubClusterCleaner in Global Policy Generator. (botong) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f833e1b3 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f833e1b3 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f833e1b3 Branch: refs/heads/YARN-7402 Commit: f833e1b300758e7c7622e2ca93c2dd164ec6d73d Parents: 48a8379 Author: Botong Huang Authored: Thu Feb 1 14:43:48 2018 -0800 Committer: Botong Huang Committed: Thu Aug 2 09:59:48 2018 -0700 -- .../dev-support/findbugs-exclude.xml| 5 + .../hadoop/yarn/conf/YarnConfiguration.java | 18 +++ .../src/main/resources/yarn-default.xml | 24 .../store/impl/MemoryFederationStateStore.java | 13 ++ .../utils/FederationStateStoreFacade.java | 41 ++- .../GlobalPolicyGenerator.java | 92 ++- .../subclustercleaner/SubClusterCleaner.java| 109 + .../subclustercleaner/package-info.java | 19 +++ .../TestSubClusterCleaner.java | 118 +++ 9 files changed, 409 insertions(+), 30 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f833e1b3/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml index 216c3bd..9fcafad 100644 --- a/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml +++ b/hadoop-yarn-project/hadoop-yarn/dev-support/findbugs-exclude.xml @@ -387,6 +387,11 @@ + + + + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/f833e1b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index bbf877f..ec88411 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -3342,6 +3342,24 @@ public class YarnConfiguration extends Configuration { public static final boolean DEFAULT_ROUTER_WEBAPP_PARTIAL_RESULTS_ENABLED = false; + private static final String FEDERATION_GPG_PREFIX = + FEDERATION_PREFIX + "gpg."; + + // The number of threads to use for the GPG scheduled executor service + public static final String GPG_SCHEDULED_EXECUTOR_THREADS = + FEDERATION_GPG_PREFIX + "scheduled.executor.threads"; + public static final int DEFAULT_GPG_SCHEDULED_EXECUTOR_THREADS = 10; + + // The interval at which the subcluster cleaner runs, -1 means disabled + public static final String GPG_SUBCLUSTER_CLEANER_INTERVAL_MS = + FEDERATION_GPG_PREFIX + "subcluster.cleaner.interval-ms"; + public static final long DEFAULT_GPG_SUBCLUSTER_CLEANER_INTERVAL_MS = -1; + + // The expiration time for a subcluster heartbeat, default is 30 minutes + public static final String GPG_SUBCLUSTER_EXPIRATION_MS = + FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms"; + public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 180; + // Other Configs http://git-wip-us.apache.org/repos/asf/hadoop/blob/f833e1b3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 2cc842f..66493f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -3533,6 +3533,30 @@ + The number of threads to use for the GPG scheduled executor service. + +yarn.federation.gpg.scheduled.executor.threads +10 + + + + + The interval at which the subcluster cleaner runs, -1 means disabled. + +yarn.federation.gpg.subcluster.cleaner.interval-ms +-1 + + + + + The expiration time for a sub
[04/50] [abbrv] hadoop git commit: YARN-8508. Release GPU resource for killed container. Contributed by Chandni Singh
YARN-8508. Release GPU resource for killed container. Contributed by Chandni Singh Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed9d60e8 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed9d60e8 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed9d60e8 Branch: refs/heads/YARN-7402 Commit: ed9d60e888d0acfd748fda7f66249f5b79a3ed6d Parents: 79091cf Author: Eric Yang Authored: Fri Jul 27 19:33:58 2018 -0400 Committer: Eric Yang Committed: Fri Jul 27 19:33:58 2018 -0400 -- .../nodemanager/LinuxContainerExecutor.java | 34 ++-- .../nodemanager/TestLinuxContainerExecutor.java | 9 +- 2 files changed, 25 insertions(+), 18 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed9d60e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java index 03b88a4..4253f2f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java @@ -573,15 +573,7 @@ public class LinuxContainerExecutor extends ContainerExecutor { return handleExitCode(e, container, containerId); } finally { resourcesHandler.postExecute(containerId); - - try { -if (resourceHandlerChain != null) { - resourceHandlerChain.postComplete(containerId); -} - } catch (ResourceHandlerException e) { -LOG.warn("ResourceHandlerChain.postComplete failed for " + -"containerId: " + containerId + ". Exception: " + e); - } + postComplete(containerId); } return 0; @@ -721,14 +713,7 @@ public class LinuxContainerExecutor extends ContainerExecutor { return super.reacquireContainer(ctx); } finally { resourcesHandler.postExecute(containerId); - if (resourceHandlerChain != null) { -try { - resourceHandlerChain.postComplete(containerId); -} catch (ResourceHandlerException e) { - LOG.warn("ResourceHandlerChain.postComplete failed for " + - "containerId: " + containerId + " Exception: " + e); -} - } + postComplete(containerId); } } @@ -798,6 +783,8 @@ public class LinuxContainerExecutor extends ContainerExecutor { logOutput(e.getOutput()); throw new IOException("Error in reaping container " + container.getContainerId().toString() + " exit = " + retCode, e); +} finally { + postComplete(container.getContainerId()); } return true; } @@ -968,4 +955,17 @@ public class LinuxContainerExecutor extends ContainerExecutor { LOG.warn("Unable to remove docker container: " + containerId); } } + + @VisibleForTesting + void postComplete(final ContainerId containerId) { +try { + if (resourceHandlerChain != null) { +LOG.debug("{} post complete", containerId); +resourceHandlerChain.postComplete(containerId); + } +} catch (ResourceHandlerException e) { + LOG.warn("ResourceHandlerChain.postComplete failed for " + + "containerId: {}. Exception: ", containerId, e); +} + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed9d60e8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java index ddbf3b9..6d77fc4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestLinuxContainerExecutor.java +++
[10/50] [abbrv] hadoop git commit: HADOOP-15607. AliyunOSS: fix duplicated partNumber issue in AliyunOSSBlockOutputStream. Contributed by Jinhu Wu.
HADOOP-15607. AliyunOSS: fix duplicated partNumber issue in AliyunOSSBlockOutputStream. Contributed by Jinhu Wu. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0857f116 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0857f116 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0857f116 Branch: refs/heads/YARN-7402 Commit: 0857f116b754d83d3c540cd6f989087af24fef27 Parents: 007e6f5 Author: Sammi Chen Authored: Mon Jul 30 10:53:44 2018 +0800 Committer: Sammi Chen Committed: Mon Jul 30 10:53:44 2018 +0800 -- .../aliyun/oss/AliyunOSSBlockOutputStream.java | 59 .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 2 + .../oss/TestAliyunOSSBlockOutputStream.java | 12 +++- 3 files changed, 49 insertions(+), 24 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0857f116/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java -- diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java index 12d551b..0a833b2 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSBlockOutputStream.java @@ -33,7 +33,9 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStream; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; @@ -50,7 +52,7 @@ public class AliyunOSSBlockOutputStream extends OutputStream { private boolean closed; private String key; private File blockFile; - private List blockFiles = new ArrayList<>(); + private Map blockFiles = new HashMap<>(); private long blockSize; private int blockId = 0; private long blockWritten = 0L; @@ -94,8 +96,9 @@ public class AliyunOSSBlockOutputStream extends OutputStream { blockStream.flush(); blockStream.close(); -if (!blockFiles.contains(blockFile)) { - blockFiles.add(blockFile); +if (!blockFiles.values().contains(blockFile)) { + blockId++; + blockFiles.put(blockId, blockFile); } try { @@ -107,7 +110,7 @@ public class AliyunOSSBlockOutputStream extends OutputStream { ListenableFuture partETagFuture = executorService.submit(() -> { PartETag partETag = store.uploadPart(blockFile, key, uploadId, -blockId + 1); +blockId); return partETag; }); partETagsFutures.add(partETagFuture); @@ -120,11 +123,7 @@ public class AliyunOSSBlockOutputStream extends OutputStream { store.completeMultipartUpload(key, uploadId, partETags); } } finally { - for (File tFile: blockFiles) { -if (tFile.exists() && !tFile.delete()) { - LOG.warn("Failed to delete temporary file {}", tFile); -} - } + removePartFiles(); closed = true; } } @@ -141,38 +140,52 @@ public class AliyunOSSBlockOutputStream extends OutputStream { if (closed) { throw new IOException("Stream closed."); } -try { - blockStream.write(b, off, len); - blockWritten += len; - if (blockWritten >= blockSize) { -uploadCurrentPart(); -blockWritten = 0L; +blockStream.write(b, off, len); +blockWritten += len; +if (blockWritten >= blockSize) { + uploadCurrentPart(); + blockWritten = 0L; +} + } + + private void removePartFiles() throws IOException { +for (ListenableFuture partETagFuture : partETagsFutures) { + if (!partETagFuture.isDone()) { +continue; } -} finally { - for (File tFile: blockFiles) { -if (tFile.exists() && !tFile.delete()) { - LOG.warn("Failed to delete temporary file {}", tFile); + + try { +File blockFile = blockFiles.get(partETagFuture.get().getPartNumber()); +if (blockFile != null && blockFile.exists() && !blockFile.delete()) { + LOG.warn("Failed to delete temporary file {}", blockFile); } + } catch (InterruptedException | ExecutionException e) { +throw new IOException(e); } } } private void uploadCurrentPart() throws IOException { -blockFiles.add(blockFile); blockStream.flush(); blockStream.close(); if (blockId == 0) { uploadId = store.getUploadId(key); } + +
[09/50] [abbrv] hadoop git commit: HDDS-248. Refactor DatanodeContainerProtocol.proto Contributed by Hanisha Koneru.
HDDS-248. Refactor DatanodeContainerProtocol.proto Contributed by Hanisha Koneru. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/007e6f51 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/007e6f51 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/007e6f51 Branch: refs/heads/YARN-7402 Commit: 007e6f51135adb5864f6bfc258010fd09576387b Parents: feb795b Author: Bharat Viswanadham Authored: Sat Jul 28 14:50:43 2018 -0700 Committer: Bharat Viswanadham Committed: Sat Jul 28 14:57:11 2018 -0700 -- .../scm/storage/ContainerProtocolCalls.java | 37 +--- .../main/proto/DatanodeContainerProtocol.proto | 96 +--- .../container/common/impl/HddsDispatcher.java | 51 +-- .../CloseContainerCommandHandler.java | 8 +- .../server/ratis/ContainerStateMachine.java | 6 +- .../keyvalue/KeyValueContainerData.java | 9 -- .../container/keyvalue/KeyValueHandler.java | 16 +--- .../container/ozoneimpl/OzoneContainer.java | 2 +- .../container/keyvalue/TestKeyValueHandler.java | 12 ++- .../scm/cli/container/InfoContainerHandler.java | 1 - .../ozone/container/ContainerTestHelper.java| 59 ++-- .../common/impl/TestCloseContainerHandler.java | 18 ++-- .../genesis/BenchMarkDatanodeDispatcher.java| 19 ++-- 13 files changed, 148 insertions(+), 186 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/007e6f51/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java -- diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index 36cdfc9..abad9e3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -29,6 +29,8 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos +.CloseContainerRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .DatanodeBlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos .GetKeyRequestProto; @@ -86,15 +88,18 @@ public final class ContainerProtocolCalls { .newBuilder() .setBlockID(datanodeBlockID); String id = xceiverClient.getPipeline().getLeader().getUuidString(); + ContainerCommandRequestProto request = ContainerCommandRequestProto .newBuilder() .setCmdType(Type.GetKey) +.setContainerID(datanodeBlockID.getContainerID()) .setTraceID(traceID) .setDatanodeUuid(id) .setGetKey(readKeyRequest) .build(); ContainerCommandResponseProto response = xceiverClient.sendCommand(request); validateContainerResponse(response); + return response.getGetKey(); } @@ -118,7 +123,9 @@ public final class ContainerProtocolCalls { String id = xceiverClient.getPipeline().getLeader().getUuidString(); ContainerCommandRequestProto request = ContainerCommandRequestProto.newBuilder() -.setCmdType(Type.GetCommittedBlockLength).setTraceID(traceID) +.setCmdType(Type.GetCommittedBlockLength) +.setContainerID(blockID.getContainerID()) +.setTraceID(traceID) .setDatanodeUuid(id) .setGetCommittedBlockLength(getBlockLengthRequestBuilder).build(); ContainerCommandResponseProto response = xceiverClient.sendCommand(request); @@ -143,6 +150,7 @@ public final class ContainerProtocolCalls { ContainerCommandRequestProto request = ContainerCommandRequestProto .newBuilder() .setCmdType(Type.PutKey) +.setContainerID(containerKeyData.getBlockID().getContainerID()) .setTraceID(traceID) .setDatanodeUuid(id) .setPutKey(createKeyRequest) @@ -171,6 +179,7 @@ public final class ContainerProtocolCalls { ContainerCommandRequestProto request = ContainerCommandRequestProto .newBuilder() .setCmdType(Type.ReadChunk) +.setContainerID(blockID.getContainerID()) .setTraceID(traceID) .setDatanodeUuid(id) .setReadChunk(readChunkRequest) @@ -202,6 +211,7 @@ public final class ContainerProtocolCalls { ContainerCommandRequestProto request = ContainerCommandRequestProto .newBuilder() .setCmdType(Type.WriteChunk) +
[24/50] [abbrv] hadoop git commit: YARN-8579. Recover NMToken of previous attempted component data. Contributed by Gour Saha
YARN-8579. Recover NMToken of previous attempted component data. Contributed by Gour Saha Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7ebcd76 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7ebcd76 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7ebcd76 Branch: refs/heads/YARN-7402 Commit: c7ebcd76bf3dd14127336951f2be3de772e7826a Parents: 4b540bb Author: Eric Yang Authored: Tue Jul 31 18:01:02 2018 -0400 Committer: Eric Yang Committed: Tue Jul 31 18:01:02 2018 -0400 -- .../hadoop/yarn/service/ServiceScheduler.java | 1 + .../scheduler/SchedulerApplicationAttempt.java| 3 ++- .../scheduler/fair/FairScheduler.java | 8 ++-- .../applicationsmanager/TestAMRestart.java| 18 ++ 4 files changed, 23 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ebcd76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java index cfaf356..0801ad0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java @@ -649,6 +649,7 @@ public class ServiceScheduler extends CompositeService { @Override public void onContainersReceivedFromPreviousAttempts( List containers) { + LOG.info("Containers recovered after AM registered: {}", containers); if (containers == null || containers.isEmpty()) { return; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ebcd76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index dd6d38f..f9df2b8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -785,6 +785,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity { List returnContainerList = new ArrayList<> (recoveredPreviousAttemptContainers); recoveredPreviousAttemptContainers.clear(); + updateNMTokens(returnContainerList); return returnContainerList; } finally { writeLock.unlock(); @@ -1466,4 +1467,4 @@ public class SchedulerApplicationAttempt implements SchedulableEntity { public Map getApplicationSchedulingEnvs() { return this.applicationSchedulingEnvs; } -} \ No newline at end of file +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7ebcd76/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 20d1afe..037cebf 100644 ---
[16/50] [abbrv] hadoop git commit: YARN-8584. Several typos in Log Aggregation related classes. Contributed by Szilard Nemeth.
YARN-8584. Several typos in Log Aggregation related classes. Contributed by Szilard Nemeth. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b39ad26 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b39ad26 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b39ad26 Branch: refs/heads/YARN-7402 Commit: 2b39ad26984d641bad57db2cfcc0b7515ef95f46 Parents: e8f952e Author: bibinchundatt Authored: Mon Jul 30 23:25:19 2018 +0530 Committer: bibinchundatt Committed: Mon Jul 30 23:25:19 2018 +0530 -- .../AggregatedLogDeletionService.java | 4 +-- .../logaggregation/AggregatedLogFormat.java | 8 +++--- .../LogAggregationFileController.java | 6 ++--- .../ifile/IndexedFileAggregatedLogsBlock.java | 6 ++--- .../LogAggregationIndexedFileController.java| 26 ++-- .../tfile/LogAggregationTFileController.java| 2 +- .../TestAggregatedLogDeletionService.java | 6 ++--- .../logaggregation/AppLogAggregatorImpl.java| 2 +- .../logaggregation/LogAggregationService.java | 6 ++--- .../tracker/NMLogAggregationStatusTracker.java | 4 +-- 10 files changed, 35 insertions(+), 35 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java index 562bd2c..841b870 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogDeletionService.java @@ -258,7 +258,7 @@ public class AggregatedLogDeletionService extends AbstractService { return; } setLogAggCheckIntervalMsecs(retentionSecs); -task = new LogDeletionTask(conf, retentionSecs, creatRMClient()); +task = new LogDeletionTask(conf, retentionSecs, createRMClient()); timer = new Timer(); timer.scheduleAtFixedRate(task, 0, checkIntervalMsecs); } @@ -281,7 +281,7 @@ public class AggregatedLogDeletionService extends AbstractService { // We have already marked ApplicationClientProtocol.getApplicationReport // as @Idempotent, it will automatically take care of RM restart/failover. @VisibleForTesting - protected ApplicationClientProtocol creatRMClient() throws IOException { + protected ApplicationClientProtocol createRMClient() throws IOException { return ClientRMProxy.createRMProxy(getConfig(), ApplicationClientProtocol.class); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b39ad26/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java index 4ee5c8a..d9b4c1e4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/AggregatedLogFormat.java @@ -178,7 +178,7 @@ public class AggregatedLogFormat { * The set of log files that are older than retention policy that will * not be uploaded but ready for deletion. */ -private final Set obseleteRetentionLogFiles = new HashSet(); +private final Set obsoleteRetentionLogFiles = new HashSet(); // TODO Maybe add a version string here. Instead of changing the version of // the entire k-v format @@ -324,7 +324,7 @@ public class AggregatedLogFormat { // if log files are older than retention policy, do not upload them. // but schedule them for deletion. if(logRetentionContext != null && !logRetentionContext.shouldRetainLog()){ -obseleteRetentionLogFiles.addAll(candidates); +obsoleteRetentionLogFiles.addAll(candidates); candidates.clear(); return candidates; } @@ -396,9 +396,9 @@ public class AggregatedLogFormat { return info; } -public Set
hadoop git commit: HADOOP-15629. Missing trimming in readlink in case of protocol. Contrbuted by Giovanni Matteo Fumarola
Repository: hadoop Updated Branches: refs/heads/HADOOP-15461 bac459b3f -> d71df5aac HADOOP-15629. Missing trimming in readlink in case of protocol. Contrbuted by Giovanni Matteo Fumarola Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d71df5aa Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d71df5aa Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d71df5aa Branch: refs/heads/HADOOP-15461 Commit: d71df5aac9c7df9303cef7a5ba740bfe20958374 Parents: bac459b Author: Botong Huang Authored: Thu Jul 26 21:35:13 2018 -0700 Committer: Botong Huang Committed: Thu Jul 26 21:35:13 2018 -0700 -- .../java/org/apache/hadoop/fs/FileUtil.java | 21 +++- 1 file changed, 16 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d71df5aa/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index bf3feb5..ab3d913 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -30,7 +30,10 @@ import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.net.InetAddress; +import java.net.MalformedURLException; import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; import java.net.UnknownHostException; import java.nio.charset.Charset; import java.nio.file.AccessDeniedException; @@ -203,18 +206,26 @@ public class FileUtil { return ""; } -if (Files.isSymbolicLink(f.toPath())) { +// This will make sure we remove the protocol as file:// +java.nio.file.Path pathFile; +try { + pathFile = Paths.get(new URL(f.toString()).toURI()); +} catch (MalformedURLException | URISyntaxException e) { + pathFile = f.toPath(); +} + +if (Files.isSymbolicLink(pathFile)) { java.nio.file.Path p = null; try { -p = Files.readSymbolicLink(f.toPath()); +p = Files.readSymbolicLink(pathFile); } catch (Exception e) { -LOG.warn("Exception while reading the symbolic link " -+ f.getAbsolutePath() + ". Exception= " + e.getMessage()); +LOG.warn("Exception while reading the symbolic link {}. Exception= {}", +f, e.getMessage()); return ""; } return p.toAbsolutePath().toString(); } -LOG.warn("The file " + f.getAbsolutePath() + " is not a symbolic link."); +LOG.warn("The file {} is not a symbolic link.", f); return ""; } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org