[1/2] hadoop git commit: YARN-6128. Add support for AMRMProxy HA. (Botong Huang via Subru).
Repository: hadoop Updated Branches: refs/heads/trunk 0940e4f69 -> d5f66888b http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5f66888/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java index d63b2cf..ebd85bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/amrmproxy/AMRMProxyService.java @@ -34,12 +34,13 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.registry.client.api.RegistryOperations; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SaslRpcServer; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; @@ -60,15 +61,19 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; +import org.apache.hadoop.yarn.server.api.ContainerType; +import org.apache.hadoop.yarn.server.federation.utils.FederationStateStoreFacade; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEventType; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.recovery.NMStateStoreService.RecoveredAMRMProxyState; import org.apache.hadoop.yarn.server.nodemanager.scheduler.DistributedScheduler; import org.apache.hadoop.yarn.server.security.MasterKeyData; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.server.utils.YarnServerSecurityUtils; +import org.apache.hadoop.yarn.util.ConverterUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -82,7 +87,7 @@ import com.google.common.base.Preconditions; * pipeline is a chain of interceptor instances that can inspect and modify the * request/response as needed. */ -public class AMRMProxyService extends AbstractService implements +public class AMRMProxyService extends CompositeService implements ApplicationMasterProtocol { private static final Logger LOG = LoggerFactory .getLogger(AMRMProxyService.class); @@ -96,6 +101,7 @@ public class AMRMProxyService extends AbstractService implements private InetSocketAddress listenerEndpoint; private AMRMProxyTokenSecretManager secretManager; private MapapplPipelineMap; + private RegistryOperations registry; /** * Creates an instance of the service. @@ -118,10 +124,23 @@ public class AMRMProxyService extends AbstractService implements @Override protected void serviceInit(Configuration conf) throws Exception { -super.serviceInit(conf); this.secretManager = new AMRMProxyTokenSecretManager(this.nmContext.getNMStateStore()); this.secretManager.init(conf); + +// Both second app attempt and NM restart within Federation need registry +if (conf.getBoolean(YarnConfiguration.AMRM_PROXY_HA_ENABLED, +YarnConfiguration.DEFAULT_AMRM_PROXY_HA_ENABLED) +|| conf.getBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, +YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED)) { + this.registry = FederationStateStoreFacade.createInstance(conf, + YarnConfiguration.YARN_REGISTRY_CLASS, + YarnConfiguration.DEFAULT_YARN_REGISTRY_CLASS, + RegistryOperations.class); + addService(this.registry); +} + +super.serviceInit(conf);
[2/2] hadoop git commit: YARN-6128. Add support for AMRMProxy HA. (Botong Huang via Subru).
YARN-6128. Add support for AMRMProxy HA. (Botong Huang via Subru). Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5f66888 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5f66888 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5f66888 Branch: refs/heads/trunk Commit: d5f66888b8d767ee6706fab9950c194a1bf26d32 Parents: 0940e4f Author: Subru KrishnanAuthored: Fri Nov 17 17:39:06 2017 -0800 Committer: Subru Krishnan Committed: Fri Nov 17 17:39:06 2017 -0800 -- .../hadoop/yarn/conf/YarnConfiguration.java | 13 + .../src/main/resources/yarn-default.xml | 21 ++ .../hadoop-yarn-server-common/pom.xml | 5 + .../utils/FederationRegistryClient.java | 338 +++ .../yarn/server/uam/UnmanagedAMPoolManager.java | 141 ++-- .../server/uam/UnmanagedApplicationManager.java | 212 +++- .../yarn/server/utils/AMRMClientUtils.java | 30 +- .../yarn/server/MockResourceManagerFacade.java | 103 +++--- .../utils/TestFederationRegistryClient.java | 90 + .../uam/TestUnmanagedApplicationManager.java| 100 +- .../amrmproxy/AMRMProxyApplicationContext.java | 16 + .../AMRMProxyApplicationContextImpl.java| 35 +- .../nodemanager/amrmproxy/AMRMProxyService.java | 83 - .../amrmproxy/FederationInterceptor.java| 221 +++- .../containermanager/ContainerManagerImpl.java | 9 +- .../amrmproxy/BaseAMRMProxyTest.java| 14 +- .../amrmproxy/TestAMRMProxyService.java | 21 +- .../amrmproxy/TestFederationInterceptor.java| 126 ++- .../TestableFederationInterceptor.java | 29 +- .../hadoop/yarn/server/MiniYARNCluster.java | 6 +- .../src/site/markdown/Federation.md | 11 +- 21 files changed, 1345 insertions(+), 279 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5f66888/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 34257ed..ead9977 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -2096,6 +2096,9 @@ public class YarnConfiguration extends Configuration { public static final String DEFAULT_AMRM_PROXY_INTERCEPTOR_CLASS_PIPELINE = "org.apache.hadoop.yarn.server.nodemanager.amrmproxy." + "DefaultRequestInterceptor"; + public static final String AMRM_PROXY_HA_ENABLED = NM_PREFIX + + "amrmproxy.ha.enable"; + public static final boolean DEFAULT_AMRM_PROXY_HA_ENABLED = false; /** * Default platform-agnostic CLASSPATH for YARN applications. A @@ -2930,6 +2933,11 @@ public class YarnConfiguration extends Configuration { public static final String FEDERATION_CACHE_TIME_TO_LIVE_SECS = FEDERATION_PREFIX + "cache-ttl.secs"; + public static final String FEDERATION_REGISTRY_BASE_KEY = + FEDERATION_PREFIX + "registry.base-dir"; + public static final String DEFAULT_FEDERATION_REGISTRY_BASE_KEY = + "yarnfederation/"; + // 5 minutes public static final int DEFAULT_FEDERATION_CACHE_TIME_TO_LIVE_SECS = 5 * 60; @@ -3087,6 +3095,11 @@ public class YarnConfiguration extends Configuration { // Other Configs + public static final String YARN_REGISTRY_CLASS = + YARN_PREFIX + "registry.class"; + public static final String DEFAULT_YARN_REGISTRY_CLASS = + "org.apache.hadoop.registry.client.impl.FSRegistryOperationsService"; + /** * Use YARN_CLIENT_APPLICATION_CLIENT_PROTOCOL_POLL_INTERVAL_MS instead. * The interval of the yarn client's querying application state after http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5f66888/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index e90d0f2..12cb902 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -2826,7 +2826,20
hadoop git commit: HDFS-12823. Backport HDFS-9259 "Make SO_SNDBUF size configurable at DFSClient" to branch-2.7. (Erik Krogen via zhz)
Repository: hadoop Updated Branches: refs/heads/branch-2.7 6f876f419 -> 0da13b90f HDFS-12823. Backport HDFS-9259 "Make SO_SNDBUF size configurable at DFSClient" to branch-2.7. (Erik Krogen via zhz) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0da13b90 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0da13b90 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0da13b90 Branch: refs/heads/branch-2.7 Commit: 0da13b90f713aae3ee2e84efe5ae17e1c0a02b9d Parents: 6f876f4 Author: Zhe ZhangAuthored: Fri Nov 17 16:44:21 2017 -0800 Committer: Zhe Zhang Committed: Fri Nov 17 16:48:32 2017 -0800 -- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 + .../java/org/apache/hadoop/hdfs/DFSClient.java | 9 ++ .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 4 + .../org/apache/hadoop/hdfs/DFSOutputStream.java | 4 +- .../src/main/resources/hdfs-default.xml | 12 +++ .../hadoop/hdfs/TestDFSClientSocketSize.java| 96 6 files changed, 128 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da13b90/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index caf8c0e..207611d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -20,6 +20,10 @@ Release 2.7.5 - UNRELEASED HDFS-10984. Expose nntop output as metrics. (Siddharth Wagle via xyao, zhz) +HDFS-9259. Make SO_SNDBUF size configurable at DFSClient side for hdfs write +scenario. (original patch Mingliang Liu via Ming Ma, branch-2.7 backport done +under HDFS-12823, Erik Krogen via zhz). + OPTIMIZATIONS HDFS-10711. Optimize FSPermissionChecker group membership check. http://git-wip-us.apache.org/repos/asf/hadoop/blob/0da13b90/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 1a6a96b..2cdfc20 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -51,6 +51,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPAC import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT; @@ -299,6 +301,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, final int writeMaxPackets; final ByteArrayManager.Conf writeByteArrayManagerConf; final int socketTimeout; +private final int socketSendBufferSize; final int socketCacheCapacity; final long socketCacheExpiry; final long excludedNodesCacheExpiry; @@ -369,6 +372,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, defaultChecksumOpt = getChecksumOptFromConf(conf); socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsServerConstants.READ_TIMEOUT); + socketSendBufferSize = conf.getInt(DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_KEY, + DFS_CLIENT_SOCKET_SEND_BUFFER_SIZE_DEFAULT); /** dfs.write.packet.size is an internal config variable */ writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY, DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT); @@ -511,6 +516,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory, DFSConfigKeys.DFS_CLIENT_KEY_PROVIDER_CACHE_EXPIRY_DEFAULT); } +public int getSocketSendBufferSize() { + return socketSendBufferSize; +} + public boolean isUseLegacyBlockReaderLocal() { return useLegacyBlockReaderLocal; }
hadoop git commit: HDFS-12759. Ozone: web: integrate configuration reader page to the SCM/KSM web ui. Contributed by Elek, Marton.
Repository: hadoop Updated Branches: refs/heads/HDFS-7240 2ed98acac -> 6d79f90bb HDFS-12759. Ozone: web: integrate configuration reader page to the SCM/KSM web ui. Contributed by Elek, Marton. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d79f90b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d79f90b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d79f90b Branch: refs/heads/HDFS-7240 Commit: 6d79f90bbaea40a24a0ce6e7049bb565d676e7f3 Parents: 2ed98ac Author: Anu EngineerAuthored: Fri Nov 17 15:06:05 2017 -0800 Committer: Anu Engineer Committed: Fri Nov 17 15:06:05 2017 -0800 -- .../hadoop-hdfs/src/main/webapps/ksm/index.html | 16 +-- .../hadoop-hdfs/src/main/webapps/ksm/ksm.js | 7 +- .../hadoop-hdfs/src/main/webapps/ksm/main.html | 18 +++ .../hadoop-hdfs/src/main/webapps/scm/index.html | 20 +-- .../hadoop-hdfs/src/main/webapps/scm/main.css | 23 .../hadoop-hdfs/src/main/webapps/scm/main.html | 20 +++ .../src/main/webapps/static/css/ozone-conf.css | 62 -- .../src/main/webapps/static/js/ozone-conf.js| 102 .../src/main/webapps/static/ozone.css | 60 + .../src/main/webapps/static/ozone.js| 122 ++- .../main/webapps/static/templates/config.html | 92 ++ .../src/main/webapps/static/templates/menu.html | 60 + .../webapps/static/templates/ozone-config.html | 109 - .../webapps/static/templates/rpc-metrics.html | 5 +- .../main/webapps/static/templates/tools.html| 39 -- 15 files changed, 391 insertions(+), 364 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d79f90b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/index.html -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/index.html index ec2fcc0..7f4e57b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/index.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/index.html @@ -30,14 +30,14 @@ - + - + @@ -48,17 +48,13 @@ HDFS KSM - + - - - - - - - + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d79f90b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm.js -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm.js b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm.js index 36ee15b..98240a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm.js +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/ksm.js @@ -23,7 +23,12 @@ }; angular.module('ksm', ['ozone', 'nvd3']); - +angular.module('ksm').config(function ($routeProvider) { +$routeProvider +.when("/metrics/ksm", { +template: "" +}); +}); angular.module('ksm').component('ksmMetrics', { templateUrl: 'ksm-metrics.html', controller: function ($http) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d79f90b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/main.html -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/main.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/main.html new file mode 100644 index 000..0821899 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/ksm/main.html @@ -0,0 +1,18 @@ + + + \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d79f90b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/index.html -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/index.html index a0b45be..6d044d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/index.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/index.html @@ -30,14 +30,14 @@ - + - + @@ -48,18 +48,18 @@ HDFS SCM - + + + + + - - - - - - - + + http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d79f90b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/scm/main.css -- diff --git
[hadoop] Git Push Summary
Repository: hadoop Updated Tags: refs/tags/rel/release-2.9.0 [created] 22f6259d9 - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-12830. Ozone: TestOzoneRpcClient#testPutKeyRatisThreeNodes fails. Contributed by Yiqun Lin.
Repository: hadoop Updated Branches: refs/heads/HDFS-7240 87a195b67 -> 2ed98acac HDFS-12830. Ozone: TestOzoneRpcClient#testPutKeyRatisThreeNodes fails. Contributed by Yiqun Lin. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ed98aca Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ed98aca Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ed98aca Branch: refs/heads/HDFS-7240 Commit: 2ed98acacd59a170150c84c1c3cc276e2cd0814a Parents: 87a195b Author: Xiaoyu YaoAuthored: Fri Nov 17 11:44:12 2017 -0800 Committer: Xiaoyu Yao Committed: Fri Nov 17 11:44:12 2017 -0800 -- .../org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java| 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ed98aca/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java index a78bc3f..383f32c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java @@ -21,7 +21,6 @@ package org.apache.hadoop.ozone.client.rpc; import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.ozone.MiniOzoneClassicCluster; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.conf.OzoneConfiguration; @@ -90,7 +89,7 @@ public class TestOzoneRpcClient { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, OzoneConsts.OZONE_HANDLER_DISTRIBUTED); -cluster = new MiniOzoneClassicCluster.Builder(conf) +cluster = new MiniOzoneClassicCluster.Builder(conf).numDataNodes(5) .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build(); conf.set("ozone.client.protocol", "org.apache.hadoop.ozone.client.rpc.RpcClient"); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[07/19] hadoop git commit: YARN-7469. Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit. Contributed by Eric Payne.
YARN-7469. Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit. Contributed by Eric Payne. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61ace174 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61ace174 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61ace174 Branch: refs/heads/YARN-6592 Commit: 61ace174cdcbca9d22abce7aa0aa71148f37ad55 Parents: f4d5d20 Author: Sunil GAuthored: Thu Nov 16 22:34:23 2017 +0530 Committer: Sunil G Committed: Thu Nov 16 22:34:23 2017 +0530 -- .../FifoIntraQueuePreemptionPlugin.java | 6 ...alCapacityPreemptionPolicyMockFramework.java | 3 ++ ...cityPreemptionPolicyIntraQueueUserLimit.java | 35 3 files changed, 44 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/61ace174/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java index 00ae3da..3332f2a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java @@ -203,6 +203,12 @@ public class FifoIntraQueuePreemptionPlugin Resources.subtractFromNonNegative(preemtableFromApp, tmpApp.selected); Resources.subtractFromNonNegative(preemtableFromApp, tmpApp.getAMUsed()); + if (context.getIntraQueuePreemptionOrderPolicy() +.equals(IntraQueuePreemptionOrderPolicy.USERLIMIT_FIRST)) { +Resources.subtractFromNonNegative(preemtableFromApp, + tmpApp.getFiCaSchedulerApp().getCSLeafQueue().getMinimumAllocation()); + } + // Calculate toBePreempted from apps as follows: // app.preemptable = min(max(app.used - app.selected - app.ideal, 0), // intra_q_preemptable) http://git-wip-us.apache.org/repos/asf/hadoop/blob/61ace174/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java index 4fc0ea4..0bc5cb5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java @@ -358,6 +358,9 @@ public class ProportionalCapacityPreemptionPolicyMockFramework { queue = (LeafQueue) nameToCSQueues.get(queueName); queue.getApplications().add(app); queue.getAllApplications().add(app); + when(queue.getMinimumAllocation()) + .thenReturn(Resource.newInstance(1,1)); + when(app.getCSLeafQueue()).thenReturn(queue); HashSet users = userMap.get(queueName); if (null == users) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/61ace174/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueUserLimit.java -- diff --git
[14/19] hadoop git commit: YARN-7430. Enable user re-mapping for Docker containers by default. Contributed by Eric Yang.
YARN-7430. Enable user re-mapping for Docker containers by default. Contributed by Eric Yang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f0b238a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f0b238a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f0b238a Branch: refs/heads/YARN-6592 Commit: 5f0b238a118f3992bd149d8c02e6a1376dee96d7 Parents: e182e77 Author: Varun VasudevAuthored: Fri Nov 17 12:04:47 2017 +0530 Committer: Varun Vasudev Committed: Fri Nov 17 12:04:47 2017 +0530 -- .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f0b238a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 4799137..34257ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1735,7 +1735,7 @@ public class YarnConfiguration extends Configuration { DOCKER_CONTAINER_RUNTIME_PREFIX + "enable-userremapping.allowed"; /** Set enable user remapping as false by default. */ - public static final boolean DEFAULT_NM_DOCKER_ENABLE_USER_REMAPPING = false; + public static final boolean DEFAULT_NM_DOCKER_ENABLE_USER_REMAPPING = true; /** lower limit for acceptable uids of user remapped user. */ public static final String NM_DOCKER_USER_REMAPPING_UID_THRESHOLD = - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[16/19] hadoop git commit: YARN-6594. [API] Introduce SchedulingRequest object. (Konstantinos Karanasos via wangda)
YARN-6594. [API] Introduce SchedulingRequest object. (Konstantinos Karanasos via wangda) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff5a3204 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff5a3204 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff5a3204 Branch: refs/heads/YARN-6592 Commit: ff5a320465d499d42524c0f842f51192e7c5f0f5 Parents: 90ac4f8 Author: Wangda TanAuthored: Mon Oct 30 16:54:02 2017 -0700 Committer: Arun Suresh Committed: Fri Nov 17 10:43:44 2017 -0800 -- .../hadoop/yarn/api/records/ResourceSizing.java | 64 + .../yarn/api/records/SchedulingRequest.java | 205 ++ .../src/main/proto/yarn_protos.proto| 14 + .../records/impl/pb/ResourceSizingPBImpl.java | 117 .../impl/pb/SchedulingRequestPBImpl.java| 266 +++ 5 files changed, 666 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5a3204/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java new file mode 100644 index 000..d82be11 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.util.Records; + +/** + * {@code ResourceSizing} contains information for the size of a + * {@link SchedulingRequest}, such as the number of requested allocations and + * the resources for each allocation. + */ +@Public +@Unstable +public abstract class ResourceSizing { + + @Public + @Unstable + public static ResourceSizing newInstance(Resource resources) { +return ResourceSizing.newInstance(1, resources); + } + + @Public + @Unstable + public static ResourceSizing newInstance(int numAllocations, Resource resources) { +ResourceSizing resourceSizing = Records.newRecord(ResourceSizing.class); +resourceSizing.setNumAllocations(numAllocations); +resourceSizing.setResources(resources); +return resourceSizing; + } + + @Public + @Unstable + public abstract int getNumAllocations(); + + @Public + @Unstable + public abstract void setNumAllocations(int numAllocations); + + @Public + @Unstable + public abstract Resource getResources(); + + @Public + @Unstable + public abstract void setResources(Resource resources); +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff5a3204/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java new file mode 100644 index 000..47a0697 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/SchedulingRequest.java @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in
[15/19] hadoop git commit: YARN-7218. Decouple YARN Services REST API namespace from RM. (Contributed by Eric Yang)
YARN-7218. Decouple YARN Services REST API namespace from RM. (Contributed by Eric Yang) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0940e4f6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0940e4f6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0940e4f6 Branch: refs/heads/YARN-6592 Commit: 0940e4f692441f16e742666ac925f71a083eab27 Parents: 5f0b238 Author: Eric YangAuthored: Fri Nov 17 12:28:12 2017 -0500 Committer: Eric Yang Committed: Fri Nov 17 12:28:12 2017 -0500 -- .../hadoop/yarn/service/TestApiServer.java | 4 +- .../yarn/service/api/records/Component.java | 51 +--- .../service/api/records/ReadinessCheck.java | 14 +- .../yarn/service/api/records/Service.java | 15 +++--- .../yarn/service/conf/RestApiConstants.java | 2 +- .../org/apache/hadoop/yarn/webapp/WebApps.java | 10 +++- .../server/resourcemanager/ResourceManager.java | 14 +- .../server/resourcemanager/webapp/RMWebApp.java | 15 -- 8 files changed, 77 insertions(+), 48 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0940e4f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java index 2b22474..896b2f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java @@ -62,8 +62,8 @@ public class TestApiServer { this.apiServer.getClass().isAnnotationPresent(Path.class)); final Path path = this.apiServer.getClass() .getAnnotation(Path.class); -assertEquals("The path has /ws/v1 annotation", path.value(), -"/ws/v1"); +assertEquals("The path has /v1 annotation", path.value(), +"/v1"); } @Test http://git-wip-us.apache.org/repos/asf/hadoop/blob/0940e4f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java index fe9c043..ce0e0cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java @@ -26,6 +26,8 @@ import java.util.Collections; import java.util.List; import java.util.Objects; +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; @@ -46,22 +48,53 @@ import org.apache.hadoop.classification.InterfaceStability; @ApiModel(description = "One or more components of the service. If the service is HBase say, then the component can be a simple role like master or regionserver. If the service is a complex business webapp then a component can be other services say Kafka or Storm. Thereby it opens up the support for complex and nested services.") @javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") @XmlRootElement +@XmlAccessorType(XmlAccessType.FIELD) @JsonInclude(JsonInclude.Include.NON_NULL) public class Component implements Serializable { private static final long serialVersionUID = -8430058381509087805L; + @JsonProperty("name") private String name = null; + + @JsonProperty("dependencies") private List dependencies = new ArrayList(); + + @JsonProperty("readiness_check") + @XmlElement(name = "readiness_check") private
[06/19] hadoop git commit: YARN-7486. Race condition in service AM that can cause NPE. Contributed by Jian He
YARN-7486. Race condition in service AM that can cause NPE. Contributed by Jian He Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4d5d202 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4d5d202 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4d5d202 Branch: refs/heads/YARN-6592 Commit: f4d5d20286eb05449f6fd7cd6ff0554228205fe2 Parents: 462e25a Author: Billie RinaldiAuthored: Wed Nov 15 10:20:46 2017 -0800 Committer: Billie Rinaldi Committed: Thu Nov 16 07:58:06 2017 -0800 -- .../hadoop/yarn/service/ServiceScheduler.java | 50 - .../yarn/service/component/Component.java | 58 -- .../yarn/service/component/ComponentEvent.java | 11 ++ .../component/instance/ComponentInstance.java | 83 +++--- .../containerlaunch/ContainerLaunchService.java | 2 +- .../provider/AbstractProviderService.java | 5 +- .../yarn/service/provider/ProviderService.java | 5 +- .../yarn/service/provider/ProviderUtils.java| 5 +- .../ServiceTimelinePublisher.java | 5 +- .../hadoop/yarn/service/MockServiceAM.java | 66 --- .../hadoop/yarn/service/ServiceTestUtils.java | 5 +- .../hadoop/yarn/service/TestServiceAM.java | 109 +++ .../service/monitor/TestServiceMonitor.java | 12 ++ 13 files changed, 290 insertions(+), 126 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d5d202/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java index a7b7e22..6bc5673 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java @@ -132,7 +132,6 @@ public class ServiceScheduler extends CompositeService { private AMRMClientAsync amRMClient; private NMClientAsync nmClient; private AsyncDispatcher dispatcher; - AsyncDispatcher compInstanceDispatcher; private YarnRegistryViewForProviders yarnRegistryOperations; private ServiceContext context; private ContainerLaunchService containerLaunchService; @@ -152,7 +151,7 @@ public class ServiceScheduler extends CompositeService { yarnRegistryOperations = createYarnRegistryOperations(context, registryClient); -// register metrics +// register metrics, serviceMetrics = ServiceMetrics .register(app.getName(), "Metrics for service"); serviceMetrics.tag("type", "Metrics type [component or service]", "service"); @@ -167,14 +166,11 @@ public class ServiceScheduler extends CompositeService { dispatcher = new AsyncDispatcher("Component dispatcher"); dispatcher.register(ComponentEventType.class, new ComponentEventHandler()); +dispatcher.register(ComponentInstanceEventType.class, +new ComponentInstanceEventHandler()); dispatcher.setDrainEventsOnStop(); addIfService(dispatcher); -compInstanceDispatcher = -new AsyncDispatcher("CompInstance dispatcher"); -compInstanceDispatcher.register(ComponentInstanceEventType.class, -new ComponentInstanceEventHandler()); -addIfService(compInstanceDispatcher); containerLaunchService = new ContainerLaunchService(context.fs); addService(containerLaunchService); @@ -277,10 +273,10 @@ public class ServiceScheduler extends CompositeService { } private void recoverComponents(RegisterApplicationMasterResponse response) { -List recoveredContainers = response +List containersFromPrevAttempt = response .getContainersFromPreviousAttempts(); LOG.info("Received {} containers from previous attempt.", -recoveredContainers.size()); +containersFromPrevAttempt.size()); Map existingRecords = new HashMap<>(); List existingComps = null; try { @@ -302,9 +298,8 @@ public class ServiceScheduler extends CompositeService { } } } -for (Container container : recoveredContainers) { - LOG.info("Handling container {} from
[18/19] hadoop git commit: YARN-6595. [API] Add Placement Constraints at the application level. (Arun Suresh via kkaranasos)
YARN-6595. [API] Add Placement Constraints at the application level. (Arun Suresh via kkaranasos) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c24deea4 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c24deea4 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c24deea4 Branch: refs/heads/YARN-6592 Commit: c24deea45d8db458c8a7285e346a867b49cf11dc Parents: ff5a320 Author: Konstantinos KaranasosAuthored: Mon Nov 13 15:25:24 2017 -0800 Committer: Arun Suresh Committed: Fri Nov 17 10:43:44 2017 -0800 -- .../RegisterApplicationMasterRequest.java | 42 - .../yarn/api/resource/PlacementConstraint.java | 156 +++ .../src/main/proto/yarn_protos.proto| 6 + .../src/main/proto/yarn_service_protos.proto| 1 + .../RegisterApplicationMasterRequestPBImpl.java | 106 - .../hadoop/yarn/api/BasePBImplRecordsTest.java | 11 ++ 6 files changed, 313 insertions(+), 9 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c24deea4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java index 395e190..f2d537a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java @@ -18,11 +18,16 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; + import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; +import org.apache.hadoop.yarn.api.resource.PlacementConstraint; import org.apache.hadoop.yarn.util.Records; - /** * The request sent by the {@code ApplicationMaster} to {@code ResourceManager} * on registration. @@ -132,4 +137,39 @@ public abstract class RegisterApplicationMasterRequest { @Public @Stable public abstract void setTrackingUrl(String trackingUrl); + + /** + * Return all Placement Constraints specified at the Application level. The + * mapping is from a set of allocation tags to a + * PlacementConstraint associated with the tags, i.e., each + * {@link org.apache.hadoop.yarn.api.records.SchedulingRequest} that has those + * tags will be placed taking into account the corresponding constraint. + * + * @return A map of Placement Constraints. + */ + @Public + @Unstable + public Map getPlacementConstraints() { +return new HashMap<>(); + } + + /** + * Set Placement Constraints applicable to the + * {@link org.apache.hadoop.yarn.api.records.SchedulingRequest}s + * of this application. + * The mapping is from a set of allocation tags to a + * PlacementConstraint associated with the tags. + * For example: + * Map + * hb_regionserver - node_anti_affinity, + * hb_regionserver, hb_master - rack_affinity, + * ... + * + * @param placementConstraints Placement Constraint Mapping. + */ + @Public + @Unstable + public void setPlacementConstraints( + Map placementConstraints) { + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/c24deea4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java index f0e3982..b6e851a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java @@ -54,6 +54,26 @@ public class PlacementConstraint { return constraintExpr; } + @Override +
[01/19] hadoop git commit: HADOOP-15023. ValueQueue should also validate (int) (lowWatermark * numValues) > 0 on construction. [Forced Update!]
Repository: hadoop Updated Branches: refs/heads/YARN-6592 32620a129 -> b9f0e942f (forced update) HADOOP-15023. ValueQueue should also validate (int) (lowWatermark * numValues) > 0 on construction. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1941b20 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1941b20 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1941b20 Branch: refs/heads/YARN-6592 Commit: b1941b200d6b4fa6a7891421c0a1e212cad3d6eb Parents: fac72ee Author: Xiao ChenAuthored: Wed Nov 15 16:43:25 2017 -0800 Committer: Xiao Chen Committed: Wed Nov 15 16:44:06 2017 -0800 -- .../main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java | 6 -- 1 file changed, 4 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1941b20/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java index 8411ffb..1ddd8a3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java @@ -224,6 +224,9 @@ public class ValueQueue { Preconditions.checkArgument(numValues > 0, "\"numValues\" must be > 0"); Preconditions.checkArgument(((lowWatermark > 0)&&(lowWatermark <= 1)), "\"lowWatermark\" must be > 0 and <= 1"); +final int watermarkValue = (int) (numValues * lowWatermark); +Preconditions.checkArgument(watermarkValue > 0, +"(int) (\"numValues\" * \"lowWatermark\") must be > 0"); Preconditions.checkArgument(expiry > 0, "\"expiry\" must be > 0"); Preconditions.checkArgument(numFillerThreads > 0, "\"numFillerThreads\" must be > 0"); @@ -243,8 +246,7 @@ public class ValueQueue { throws Exception { LinkedBlockingQueue keyQueue = new LinkedBlockingQueue(); -refiller.fillQueueForKey(keyName, keyQueue, -(int)(lowWatermark * numValues)); +refiller.fillQueueForKey(keyName, keyQueue, watermarkValue); return keyQueue; } }); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[19/19] hadoop git commit: YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos via wangda)
YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos via wangda) Change-Id: Id00edb7185fdf01cce6e40f920cac3585f8cbe9c Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90ac4f89 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90ac4f89 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90ac4f89 Branch: refs/heads/YARN-6592 Commit: 90ac4f89792ee6b6496118321fa9b566f40e53a9 Parents: 0940e4f Author: Wangda TanAuthored: Thu Aug 3 14:03:55 2017 -0700 Committer: Arun Suresh Committed: Fri Nov 17 10:43:44 2017 -0800 -- .../yarn/api/resource/PlacementConstraint.java | 567 +++ .../yarn/api/resource/PlacementConstraints.java | 286 ++ .../hadoop/yarn/api/resource/package-info.java | 23 + .../src/main/proto/yarn_protos.proto| 55 ++ .../api/resource/TestPlacementConstraints.java | 106 .../PlacementConstraintFromProtoConverter.java | 116 .../pb/PlacementConstraintToProtoConverter.java | 174 ++ .../apache/hadoop/yarn/api/pb/package-info.java | 23 + .../yarn/api/records/impl/pb/ProtoUtils.java| 27 + .../PlacementConstraintTransformations.java | 209 +++ .../hadoop/yarn/api/resource/package-info.java | 23 + .../TestPlacementConstraintPBConversion.java| 195 +++ .../TestPlacementConstraintTransformations.java | 183 ++ 13 files changed, 1987 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/90ac4f89/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java new file mode 100644 index 000..f0e3982 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java @@ -0,0 +1,567 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.resource; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +/** + * {@code PlacementConstraint} represents a placement constraint for a resource + * allocation. + */ +@Public +@Unstable +public class PlacementConstraint { + + /** + * The constraint expression tree. + */ + private AbstractConstraint constraintExpr; + + public PlacementConstraint(AbstractConstraint constraintExpr) { +this.constraintExpr = constraintExpr; + } + + /** + * Get the constraint expression of the placement constraint. + * + * @return the constraint expression + */ + public AbstractConstraint getConstraintExpr() { +return constraintExpr; + } + + /** + * Interface used to enable the elements of the constraint tree to be visited. + */ + @Private + public interface Visitable { +/** + * Visitor pattern. + * + * @param visitor visitor to be used + * @param defines the type that the visitor will use and the return type + * of the accept. + * @return the result of visiting a given object. + */ + T accept(Visitor visitor); + + } + + /** + * Visitor API for a constraint tree. + * + * @param determines the return type of the visit methods. + */ + @Private + public interface Visitor { +T visit(SingleConstraint constraint); + +T visit(TargetExpression target); + +T visit(TargetConstraint constraint); + +T visit(CardinalityConstraint constraint); + +T visit(And
[08/19] hadoop git commit: YARN-7390. All reservation related test cases failed when TestYarnClient runs against Fair Scheduler. (Yufei Gu via Haibo Chen)
YARN-7390. All reservation related test cases failed when TestYarnClient runs against Fair Scheduler. (Yufei Gu via Haibo Chen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28d0fcbe Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28d0fcbe Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28d0fcbe Branch: refs/heads/YARN-6592 Commit: 28d0fcbef40930ca5652c0e9a5d777910f3ad3c4 Parents: 61ace17 Author: Haibo ChenAuthored: Thu Nov 16 10:48:24 2017 -0800 Committer: Haibo Chen Committed: Thu Nov 16 10:48:24 2017 -0800 -- .../yarn/client/api/impl/TestYarnClient.java| 455 ++-- .../api/impl/TestYarnClientWithReservation.java | 521 +++ 2 files changed, 551 insertions(+), 425 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d0fcbe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index 4c1a9cf..f6e305f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -33,7 +33,6 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; @@ -42,7 +41,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import com.google.common.base.Supplier; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataInputByteBuffer; @@ -75,14 +73,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -96,13 +86,7 @@ import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; -import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.Priority; -import org.apache.hadoop.yarn.api.records.ReservationDefinition; -import org.apache.hadoop.yarn.api.records.ReservationId; -import org.apache.hadoop.yarn.api.records.ReservationRequest; -import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter; -import org.apache.hadoop.yarn.api.records.ReservationRequests; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.SignalContainerCommand; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; @@ -119,23 +103,28 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.ParameterizedSchedulerTestBase; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; -import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystemTestUtil; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import
[03/19] hadoop git commit: YARN-7492. Set up SASS for new YARN UI styling. Contributed by Vasudevan Skm.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09a13426/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock index dc45d7e..fb35ea7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock @@ -25,6 +25,15 @@ after@0.8.1: version "0.8.1" resolved "https://registry.yarnpkg.com/after/-/after-0.8.1.tgz#ab5d4fb883f596816d3515f8f791c0af486dd627; +ajv@^5.1.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-5.3.0.tgz#4414ff74a50879c208ee5fdc826e32c303549eda; + dependencies: +co "^4.6.0" +fast-deep-equal "^1.0.0" +fast-json-stable-stringify "^2.0.0" +json-schema-traverse "^0.3.0" + align-text@^0.1.1, align-text@^0.1.3: version "0.1.4" resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117; @@ -94,6 +103,10 @@ anymatch@^1.3.0: arrify "^1.0.0" micromatch "^2.1.5" +aproba@^1.0.3: + version "1.2.0" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a; + archy@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40; @@ -132,6 +145,10 @@ array-equal@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93; +array-find-index@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-find-index/-/array-find-index-1.0.2.tgz#df010aa1287e164bbda6f9723b0a96a1ec4187a1; + array-flatten@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2; @@ -202,6 +219,10 @@ async-disk-cache@^1.0.0: rimraf "^2.5.3" rsvp "^3.0.18" +async-foreach@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/async-foreach/-/async-foreach-0.1.3.tgz#36121f845c0578172de419a97dbeb1d16ec34542; + async-some@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/async-some/-/async-some-1.0.2.tgz#4d8a81620d5958791b5b98f802d3207776e95509; @@ -230,11 +251,19 @@ async@~0.8.0: version "0.8.0" resolved "https://registry.yarnpkg.com/async/-/async-0.8.0.tgz#ee65ec77298c2ff1456bc4418a052d0f06435112; +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79; + aws-sign2@~0.6.0: version "0.6.0" resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f; -aws4@^1.2.1: +aws-sign2@~0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8; + +aws4@^1.2.1, aws4@^1.6.0: version "1.6.0" resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e; @@ -375,6 +404,10 @@ balanced-match@^0.4.1: version "0.4.2" resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838; +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767; + base64-arraybuffer@0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.2.tgz#474df4a9f2da24e05df3158c3b1db3c3cd46a154; @@ -473,6 +506,18 @@ boom@2.x.x: dependencies: hoek "2.x.x" +boom@4.x.x: + version "4.3.1" + resolved "https://registry.yarnpkg.com/boom/-/boom-4.3.1.tgz#4f8a3005cb4a7e3889f749030fd25b96e01d2e31; + dependencies: +hoek "4.x.x" + +boom@5.x.x: + version "5.2.0" + resolved "https://registry.yarnpkg.com/boom/-/boom-5.2.0.tgz#5dd9da6ee3a5f302077436290cb717d3f4a54e02; + dependencies: +hoek "4.x.x" + bower-config@0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/bower-config/-/bower-config-0.6.1.tgz#7093155688bef44079bf4cb32d189312c87ded60; @@ -507,6 +552,13 @@ brace-expansion@^1.0.0: balanced-match "^0.4.1" concat-map "0.0.1" +brace-expansion@^1.1.7: + version "1.1.8" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.8.tgz#c07b211c7c952ec1f8efd51a77ef0d1d3990a292; + dependencies: +balanced-match "^1.0.0" +concat-map "0.0.1" + braces@^1.8.2: version "1.8.5" resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7; @@ -558,6 +610,17 @@ broccoli-caching-writer@^2.0.0,
[04/19] hadoop git commit: YARN-7492. Set up SASS for new YARN UI styling. Contributed by Vasudevan Skm.
YARN-7492. Set up SASS for new YARN UI styling. Contributed by Vasudevan Skm. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09a13426 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09a13426 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09a13426 Branch: refs/heads/YARN-6592 Commit: 09a13426086aed9d1bc63a844858dcac560763a6 Parents: 675e9a8 Author: Sunil GAuthored: Thu Nov 16 13:44:21 2017 +0530 Committer: Sunil G Committed: Thu Nov 16 13:44:21 2017 +0530 -- .gitignore | 3 + .../main/webapp/app/components/nodes-heatmap.js | 6 +- ...er-app-memusage-by-nodes-stacked-barchart.js | 2 +- .../main/webapp/app/components/tree-selector.js | 6 +- .../src/main/webapp/app/styles/app.css | 717 -- .../src/main/webapp/app/styles/app.scss | 723 +++ .../src/main/webapp/app/styles/colors.scss | 37 + .../src/main/webapp/app/styles/variables.scss | 40 + .../src/main/webapp/app/utils/color-utils.js| 8 +- .../hadoop-yarn-ui/src/main/webapp/package.json | 3 +- .../hadoop-yarn-ui/src/main/webapp/yarn.lock| 674 - 11 files changed, 1477 insertions(+), 742 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/09a13426/.gitignore -- diff --git a/.gitignore b/.gitignore index 70c1f23..440708a 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ *.sdf *.suo *.vcxproj.user +*.patch .idea .svn .classpath @@ -45,3 +46,5 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp yarnregistry.pdf patchprocess/ .history/ +hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package-lock.json +hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log http://git-wip-us.apache.org/repos/asf/hadoop/blob/09a13426/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js index 1ea655b..7802d42 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js @@ -122,8 +122,8 @@ export default BaseChartComponent.extend({ var xOffset = layout.margin; var yOffset = layout.margin * 3; -var gradientStartColor = "#2ca02c"; -var gradientEndColor = "#ffb014"; +var gradientStartColor = "#60cea5"; +var gradientEndColor = "#ffbc0b"; var colorFunc = d3.interpolateRgb(d3.rgb(gradientStartColor), d3.rgb(gradientEndColor)); @@ -138,7 +138,7 @@ export default BaseChartComponent.extend({ var rect = g.append("rect") .attr("x", sampleXOffset) .attr("y", sampleYOffset) -.attr("fill", this.selectedCategory === i ? "#2c7bb6" : colorFunc(ratio)) +.attr("fill", this.selectedCategory === i ? "#26bbf0" : colorFunc(ratio)) .attr("width", this.SAMPLE_CELL_WIDTH) .attr("height", this.SAMPLE_HEIGHT) .attr("class", "hyperlink"); http://git-wip-us.apache.org/repos/asf/hadoop/blob/09a13426/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js index 65cbaf5..c01fe36 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js @@ -74,7 +74,7 @@ export default StackedBarchart.extend({ didInsertElement: function() { this.initChart(true); -this.colors = ["Orange", "Grey", "LimeGreen"]; +this.colors = ["lightsalmon", "Grey", "mediumaquamarine"]; var containers = this.get("rmContainers"); var nodes = this.get("nodes"); http://git-wip-us.apache.org/repos/asf/hadoop/blob/09a13426/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js -- diff --git
[02/19] hadoop git commit: Revert "HDFS-12681. Fold HdfsLocatedFileStatus into HdfsFileStatus."
Revert "HDFS-12681. Fold HdfsLocatedFileStatus into HdfsFileStatus." This reverts commit b85603e3f85e85da406241b991f3a9974384c3aa. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/675e9a8f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/675e9a8f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/675e9a8f Branch: refs/heads/YARN-6592 Commit: 675e9a8f57570771a0219d95940681b067d36b94 Parents: b1941b2 Author: Chris DouglasAuthored: Wed Nov 15 19:17:46 2017 -0800 Committer: Chris Douglas Committed: Wed Nov 15 19:20:37 2017 -0800 -- .../org/apache/hadoop/fs/LocatedFileStatus.java | 11 +- .../main/java/org/apache/hadoop/fs/Hdfs.java| 4 +- .../hadoop/hdfs/DistributedFileSystem.java | 4 +- .../hadoop/hdfs/protocol/HdfsFileStatus.java| 80 -- .../hdfs/protocol/HdfsLocatedFileStatus.java| 110 +++ .../hadoop/hdfs/protocolPB/PBHelperClient.java | 65 +-- .../dev-support/findbugsExcludeFile.xml | 7 +- .../apache/hadoop/hdfs/server/mover/Mover.java | 6 +- .../server/namenode/FSDirStatAndListingOp.java | 61 +- .../hadoop/hdfs/TestBlockStoragePolicy.java | 10 +- .../apache/hadoop/hdfs/TestDFSOutputStream.java | 2 +- .../hdfs/server/mover/TestStorageMover.java | 8 +- 12 files changed, 217 insertions(+), 151 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/675e9a8f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java index 9cc81d3..29e1998 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java @@ -135,16 +135,7 @@ public class LocatedFileStatus extends FileStatus { public BlockLocation[] getBlockLocations() { return locations; } - - /** - * Hook for subclasses to lazily set block locations. The {@link #locations} - * field should be null before this is called. - * @param locations Block locations for this instance. - */ - protected void setBlockLocations(BlockLocation[] locations) { -this.locations = locations; - } - + /** * Compare this FileStatus to another FileStatus * @param o the FileStatus to be compared. http://git-wip-us.apache.org/repos/asf/hadoop/blob/675e9a8f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java index f306d06..0138195 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.AccessControlException; @@ -187,7 +188,8 @@ public class Hdfs extends AbstractFileSystem { @Override public LocatedFileStatus next() throws IOException { -return getNext().makeQualifiedLocated(getUri(), p); +return ((HdfsLocatedFileStatus)getNext()).makeQualifiedLocated( +getUri(), p); } }; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/675e9a8f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index eef83d7..9db12e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -85,6
[09/19] hadoop git commit: YARN-7503. Configurable heap size / JVM opts in service AM. Contributed by Jonathan Hung
YARN-7503. Configurable heap size / JVM opts in service AM. Contributed by Jonathan Hung Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bf2c301 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bf2c301 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bf2c301 Branch: refs/heads/YARN-6592 Commit: 6bf2c301924a3acae5a7510b8473f6292a5a471b Parents: 28d0fcb Author: Jian HeAuthored: Thu Nov 16 10:53:55 2017 -0800 Committer: Jian He Committed: Thu Nov 16 10:53:55 2017 -0800 -- .../apache/hadoop/yarn/service/client/ServiceClient.java | 10 +- .../apache/hadoop/yarn/service/conf/YarnServiceConf.java | 5 + .../service/containerlaunch/JavaCommandLineBuilder.java | 11 +-- 3 files changed, 15 insertions(+), 11 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf2c301/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index af43f8a..d1b6026 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -559,7 +559,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes, Map env = addAMEnv(); // create AM CLI -String cmdStr = buildCommandLine(serviceName, conf, appRootDir, hasAMLog4j); +String cmdStr = buildCommandLine(app, conf, appRootDir, hasAMLog4j); submissionContext.setResource(Resource.newInstance(YarnServiceConf .getLong(YarnServiceConf.AM_RESOURCE_MEM, YarnServiceConf.DEFAULT_KEY_AM_RESOURCE_MEM, app.getConfiguration(), @@ -624,12 +624,12 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes, LOG.debug(builder.toString()); } - private String buildCommandLine(String serviceName, Configuration conf, + private String buildCommandLine(Service app, Configuration conf, Path appRootDir, boolean hasSliderAMLog4j) throws BadConfigException { JavaCommandLineBuilder CLI = new JavaCommandLineBuilder(); CLI.forceIPv4().headless(); -//TODO CLI.setJVMHeap -//TODO CLI.addJVMOPTS +CLI.setJVMOpts(YarnServiceConf.get(YarnServiceConf.JVM_OPTS, null, +app.getConfiguration(), conf)); if (hasSliderAMLog4j) { CLI.sysprop(SYSPROP_LOG4J_CONFIGURATION, YARN_SERVICE_LOG4J_FILENAME); CLI.sysprop(SYSPROP_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR); @@ -637,7 +637,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes, CLI.add(ServiceMaster.class.getCanonicalName()); //TODO debugAM CLI.add(Arguments.ARG_DEBUG) CLI.add("-" + ServiceMaster.YARNFILE_OPTION, new Path(appRootDir, -serviceName + ".json")); +app.getName() + ".json")); // pass the registry binding CLI.addConfOptionToCLI(conf, RegistryConstants.KEY_REGISTRY_ZK_ROOT, RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf2c301/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java index a7bd58d..684d980 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java +++
[13/19] hadoop git commit: HDFS-12801. RBF: Set MountTableResolver as default file resolver. Contributed by Inigo Goiri.
HDFS-12801. RBF: Set MountTableResolver as default file resolver. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e182e777 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e182e777 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e182e777 Branch: refs/heads/YARN-6592 Commit: e182e777947a85943504a207deb3cf3ffc047910 Parents: 0987a7b Author: Inigo GoiriAuthored: Thu Nov 16 16:58:47 2017 -0800 Committer: Inigo Goiri Committed: Thu Nov 16 16:58:47 2017 -0800 -- .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e182e777/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 28621ba..7ff91f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4883,7 +4883,7 @@ dfs.federation.router.file.resolver.client.class -org.apache.hadoop.hdfs.server.federation.MockResolver + org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver Class to resolve files to subclusters. - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-7448. [API] Add SchedulingRequest to the AllocateRequest. (Panagiotis Garefalakis via asuresh)
Repository: hadoop Updated Branches: refs/heads/YARN-6592 24494ae5b -> 32620a129 YARN-7448. [API] Add SchedulingRequest to the AllocateRequest. (Panagiotis Garefalakis via asuresh) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/32620a12 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/32620a12 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/32620a12 Branch: refs/heads/YARN-6592 Commit: 32620a1292e7848728bf712d0bf311ebf9901b07 Parents: 24494ae Author: Arun SureshAuthored: Fri Nov 17 10:42:43 2017 -0800 Committer: Arun Suresh Committed: Fri Nov 17 10:42:43 2017 -0800 -- .../api/protocolrecords/AllocateRequest.java| 42 ++ .../hadoop/yarn/api/records/ResourceSizing.java | 27 +++ .../yarn/api/records/SchedulingRequest.java | 1 + .../src/main/proto/yarn_service_protos.proto| 1 + .../impl/pb/AllocateRequestPBImpl.java | 83 .../records/impl/pb/ResourceSizingPBImpl.java | 2 +- .../impl/pb/SchedulingRequestPBImpl.java| 16 .../hadoop/yarn/api/TestPBImplRecords.java | 19 + 8 files changed, 190 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/32620a12/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java index ae0891e..d8d2347 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import java.util.Collections; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience.Public; @@ -28,6 +29,7 @@ import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.SchedulingRequest; import org.apache.hadoop.yarn.api.records.UpdateContainerRequest; import org.apache.hadoop.yarn.util.Records; @@ -212,6 +214,32 @@ public abstract class AllocateRequest { public abstract void setUpdateRequests( List updateRequests); + /** + * Get the list of Scheduling requests being sent by the + * ApplicationMaster. + * @return list of {@link SchedulingRequest} being sent by the + * ApplicationMaster. + */ + @Public + @Unstable + public List getSchedulingRequests() { +return Collections.EMPTY_LIST; + } + + /** + * Set the list of Scheduling requests to inform the + * ResourceManager about the application's resource requirements + * (potentially including allocation tags & placement constraints). + * @param schedulingRequests list of SchedulingRequest to update + * the ResourceManager about the application's resource + * requirements. + */ + @Public + @Unstable + public void setSchedulingRequests( + List schedulingRequests) { + } + @Public @Unstable public static AllocateRequestBuilder newBuilder() { @@ -314,6 +342,20 @@ public abstract class AllocateRequest { } /** + * Set the schedulingRequests of the request. + * @see AllocateRequest#setSchedulingRequests(List) + * @param schedulingRequests SchedulingRequest of the request + * @return {@link AllocateRequestBuilder} + */ +@Public +@Unstable +public AllocateRequestBuilder schedulingRequests( +List schedulingRequests) { + allocateRequest.setSchedulingRequests(schedulingRequests); + return this; +} + +/** * Return generated {@link AllocateRequest} object. * @return {@link AllocateRequest} */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/32620a12/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java index
[05/19] hadoop git commit: HDFS-12814. Add blockId when warning slow mirror/disk in BlockReceiver. Contributed by Jiandan Yang.
HDFS-12814. Add blockId when warning slow mirror/disk in BlockReceiver. Contributed by Jiandan Yang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/462e25a3 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/462e25a3 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/462e25a3 Branch: refs/heads/YARN-6592 Commit: 462e25a3b264e1148d0cbca00db7f10d43a0555f Parents: 09a1342 Author: Weiwei YangAuthored: Thu Nov 16 16:19:53 2017 +0800 Committer: Weiwei Yang Committed: Thu Nov 16 16:19:53 2017 +0800 -- .../hadoop/hdfs/server/datanode/BlockReceiver.java | 16 +++- 1 file changed, 11 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/462e25a3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 8d91f04..c052d52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -434,7 +434,8 @@ class BlockReceiver implements Closeable { if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) { LOG.warn("Slow flushOrSync took " + duration + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), isSync:" + isSync + ", flushTotalNanos=" - + flushTotalNanos + "ns, volume=" + getVolumeBaseUri()); + + flushTotalNanos + "ns, volume=" + getVolumeBaseUri() + + ", blockId=" + replicaInfo.getBlockId()); } } @@ -591,7 +592,8 @@ class BlockReceiver implements Closeable { if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) { LOG.warn("Slow BlockReceiver write packet to mirror took " + duration + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " - + "downstream DNs=" + Arrays.toString(downstreamDNs)); + + "downstream DNs=" + Arrays.toString(downstreamDNs) + + ", blockId=" + replicaInfo.getBlockId()); } } catch (IOException e) { handleMirrorOutError(e); @@ -725,7 +727,8 @@ class BlockReceiver implements Closeable { if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) { LOG.warn("Slow BlockReceiver write data to disk cost:" + duration + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " -+ "volume=" + getVolumeBaseUri()); ++ "volume=" + getVolumeBaseUri() ++ ", blockId=" + replicaInfo.getBlockId()); } if (duration > maxWriteToDiskMs) { @@ -917,7 +920,8 @@ class BlockReceiver implements Closeable { if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) { LOG.warn("Slow manageWriterOsCache took " + duration + "ms (threshold=" + datanodeSlowLogThresholdMs - + "ms), volume=" + getVolumeBaseUri()); + + "ms), volume=" + getVolumeBaseUri() + + ", blockId=" + replicaInfo.getBlockId()); } } } catch (Throwable t) { @@ -1629,7 +1633,9 @@ class BlockReceiver implements Closeable { if (duration > datanodeSlowLogThresholdMs) { LOG.warn("Slow PacketResponder send ack to upstream took " + duration + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " + myString -+ ", replyAck=" + replyAck); ++ ", replyAck=" + replyAck ++ ", downstream DNs=" + Arrays.toString(downstreamDNs) ++ ", blockId=" + replicaInfo.getBlockId()); } else if (LOG.isDebugEnabled()) { LOG.debug(myString + ", replyAck=" + replyAck); } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[12/19] hadoop git commit: YARN-7419. CapacityScheduler: Allow auto leaf queue creation after queue mapping. (Suma Shivaprasad via wangda)
YARN-7419. CapacityScheduler: Allow auto leaf queue creation after queue mapping. (Suma Shivaprasad via wangda) Change-Id: Ia1704bb8cb5070e5b180b5a85787d7b9ca57ebc6 Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0987a7b8 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0987a7b8 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0987a7b8 Branch: refs/heads/YARN-6592 Commit: 0987a7b8c2c1e4c2095821d98a7db19644df Parents: f2efaf0 Author: Wangda TanAuthored: Thu Nov 16 11:22:48 2017 -0800 Committer: Wangda Tan Committed: Thu Nov 16 11:25:52 2017 -0800 -- .../server/resourcemanager/RMAppManager.java| 7 +- .../placement/ApplicationPlacementContext.java | 52 ++ .../placement/PlacementManager.java | 34 +- .../placement/PlacementRule.java| 7 +- .../UserGroupMappingPlacementRule.java | 284 ++- .../server/resourcemanager/rmapp/RMAppImpl.java | 87 +- .../scheduler/capacity/AbstractCSQueue.java | 2 +- .../capacity/AbstractManagedParentQueue.java| 196 +++-- .../capacity/AutoCreatedLeafQueue.java | 27 +- .../scheduler/capacity/CapacityScheduler.java | 157 +++- .../CapacitySchedulerConfiguration.java | 153 .../capacity/CapacitySchedulerQueueManager.java | 103 ++- .../scheduler/capacity/ManagedParentQueue.java | 158 .../scheduler/capacity/ParentQueue.java | 13 - .../scheduler/capacity/PlanQueue.java | 25 +- .../scheduler/event/AppAddedSchedulerEvent.java | 37 +- .../server/resourcemanager/TestAppManager.java | 29 +- .../TestUserGroupMappingPlacementRule.java | 14 +- .../scheduler/TestSchedulerUtils.java | 1 + .../capacity/TestCapacityScheduler.java | 6 +- .../TestCapacitySchedulerAutoQueueCreation.java | 794 +++ 21 files changed, 1921 insertions(+), 265 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0987a7b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index d042590..5e82f40 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -360,13 +360,8 @@ public class RMAppManager implements EventHandler, private RMAppImpl createAndPopulateNewRMApp( ApplicationSubmissionContext submissionContext, long submitTime, String user, boolean isRecovery, long startTime) throws YarnException { + if (!isRecovery) { - // Do queue mapping - if (rmContext.getQueuePlacementManager() != null) { -// We only do queue mapping when it's a new application -rmContext.getQueuePlacementManager().placeApplication( -submissionContext, user); - } // fail the submission if configured application timeout value is invalid RMServerUtils.validateApplicationTimeouts( submissionContext.getApplicationTimeouts()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/0987a7b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/ApplicationPlacementContext.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/ApplicationPlacementContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/ApplicationPlacementContext.java new file mode 100644 index 000..f2f92b8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/ApplicationPlacementContext.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license
[11/19] hadoop git commit: YARN-7419. CapacityScheduler: Allow auto leaf queue creation after queue mapping. (Suma Shivaprasad via wangda)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0987a7b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java index 0a8d6fe..80b7f2f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -22,6 +22,8 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.ReservationId; +import org.apache.hadoop.yarn.server.resourcemanager.placement +.ApplicationPlacementContext; public class AppAddedSchedulerEvent extends SchedulerEvent { @@ -31,15 +33,23 @@ public class AppAddedSchedulerEvent extends SchedulerEvent { private final ReservationId reservationID; private final boolean isAppRecovering; private final Priority appPriority; + private final ApplicationPlacementContext placementContext; public AppAddedSchedulerEvent(ApplicationId applicationId, String queue, String user) { -this(applicationId, queue, user, false, null, Priority.newInstance(0)); +this(applicationId, queue, user, false, null, Priority.newInstance(0), +null); + } + + public AppAddedSchedulerEvent(ApplicationId applicationId, String queue, + String user, ApplicationPlacementContext placementContext) { +this(applicationId, queue, user, false, null, Priority.newInstance(0), +placementContext); } public AppAddedSchedulerEvent(ApplicationId applicationId, String queue, String user, ReservationId reservationID, Priority appPriority) { -this(applicationId, queue, user, false, reservationID, appPriority); +this(applicationId, queue, user, false, reservationID, appPriority, null); } public AppAddedSchedulerEvent(String user, @@ -47,12 +57,20 @@ public class AppAddedSchedulerEvent extends SchedulerEvent { Priority appPriority) { this(submissionContext.getApplicationId(), submissionContext.getQueue(), user, isAppRecovering, submissionContext.getReservationID(), -appPriority); +appPriority, null); + } + + public AppAddedSchedulerEvent(String user, + ApplicationSubmissionContext submissionContext, boolean isAppRecovering, + Priority appPriority, ApplicationPlacementContext placementContext) { +this(submissionContext.getApplicationId(), submissionContext.getQueue(), +user, isAppRecovering, submissionContext.getReservationID(), +appPriority, placementContext); } public AppAddedSchedulerEvent(ApplicationId applicationId, String queue, String user, boolean isAppRecovering, ReservationId reservationID, - Priority appPriority) { + Priority appPriority, ApplicationPlacementContext placementContext) { super(SchedulerEventType.APP_ADDED); this.applicationId = applicationId; this.queue = queue; @@ -60,6 +78,7 @@ public class AppAddedSchedulerEvent extends SchedulerEvent { this.reservationID = reservationID; this.isAppRecovering = isAppRecovering; this.appPriority = appPriority; +this.placementContext = placementContext; } public ApplicationId getApplicationId() { @@ -85,4 +104,8 @@ public class AppAddedSchedulerEvent extends SchedulerEvent { public Priority getApplicatonPriority() { return appPriority; } + + public ApplicationPlacementContext getPlacementContext() { +return placementContext; + } }
[10/19] hadoop git commit: HADOOP-14982. Clients using FailoverOnNetworkExceptionRetry can go into a loop if they're used without authenticating with kerberos in HA env (pbacsko via rkanter)
HADOOP-14982. Clients using FailoverOnNetworkExceptionRetry can go into a loop if they're used without authenticating with kerberos in HA env (pbacsko via rkanter) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2efaf01 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2efaf01 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2efaf01 Branch: refs/heads/YARN-6592 Commit: f2efaf013f7577948061abbb49c6d17c375e92cc Parents: 6bf2c30 Author: Robert KanterAuthored: Thu Nov 16 11:11:19 2017 -0800 Committer: Robert Kanter Committed: Thu Nov 16 11:11:19 2017 -0800 -- .../apache/hadoop/io/retry/RetryPolicies.java | 22 +++- .../apache/hadoop/io/retry/TestRetryProxy.java | 22 .../io/retry/UnreliableImplementation.java | 10 + .../hadoop/io/retry/UnreliableInterface.java| 6 +- 4 files changed, 58 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2efaf01/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java index fa0cb6e..adf23c0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java @@ -32,11 +32,14 @@ import java.util.Map.Entry; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; +import javax.security.sasl.SaslException; + import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.net.ConnectTimeoutException; import org.apache.hadoop.security.token.SecretManager.InvalidToken; +import org.ietf.jgss.GSSException; import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; @@ -663,6 +666,11 @@ public class RetryPolicies { + retries + ") exceeded maximum allowed (" + maxRetries + ")"); } + if (isSaslFailure(e)) { + return new RetryAction(RetryAction.RetryDecision.FAIL, 0, + "SASL failure"); + } + if (e instanceof ConnectException || e instanceof EOFException || e instanceof NoRouteToHostException || @@ -716,7 +724,7 @@ public class RetryPolicies { private static long calculateExponentialTime(long time, int retries) { return calculateExponentialTime(time, retries, Long.MAX_VALUE); } - + private static boolean isWrappedStandbyException(Exception e) { if (!(e instanceof RemoteException)) { return false; @@ -725,6 +733,18 @@ public class RetryPolicies { StandbyException.class); return unwrapped instanceof StandbyException; } + + private static boolean isSaslFailure(Exception e) { + Throwable current = e; + do { + if (current instanceof SaslException) { +return true; + } + current = current.getCause(); + } while (current != null); + + return false; + } static RetriableException getWrappedRetriableException(Exception e) { if (!(e instanceof RemoteException)) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2efaf01/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java index 649af89..1accb0a0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java @@ -39,6 +39,8 @@ import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import javax.security.sasl.SaslException; + import static org.apache.hadoop.io.retry.RetryPolicies.*; import static org.junit.Assert.*; import static org.mockito.Matchers.any; @@ -326,4 +328,24 @@ public class TestRetryProxy { assertEquals(InterruptedException.class, e.getCause().getClass()); assertEquals("sleep interrupted", e.getCause().getMessage()); } + + @Test + public void testNoRetryOnSaslError() throws Exception
[17/19] hadoop git commit: YARN-7448. [API] Add SchedulingRequest to the AllocateRequest. (Panagiotis Garefalakis via asuresh)
YARN-7448. [API] Add SchedulingRequest to the AllocateRequest. (Panagiotis Garefalakis via asuresh) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9f0e942 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9f0e942 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9f0e942 Branch: refs/heads/YARN-6592 Commit: b9f0e942f92adc2a97b7f70151f57bdf2315bfe3 Parents: c24deea Author: Arun SureshAuthored: Fri Nov 17 10:42:43 2017 -0800 Committer: Arun Suresh Committed: Fri Nov 17 10:43:44 2017 -0800 -- .../api/protocolrecords/AllocateRequest.java| 42 ++ .../hadoop/yarn/api/records/ResourceSizing.java | 27 +++ .../yarn/api/records/SchedulingRequest.java | 1 + .../src/main/proto/yarn_service_protos.proto| 1 + .../impl/pb/AllocateRequestPBImpl.java | 83 .../records/impl/pb/ResourceSizingPBImpl.java | 2 +- .../impl/pb/SchedulingRequestPBImpl.java| 16 .../hadoop/yarn/api/TestPBImplRecords.java | 19 + 8 files changed, 190 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f0e942/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java index ae0891e..d8d2347 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/AllocateRequest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import java.util.Collections; import java.util.List; import org.apache.hadoop.classification.InterfaceAudience.Public; @@ -28,6 +29,7 @@ import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.SchedulingRequest; import org.apache.hadoop.yarn.api.records.UpdateContainerRequest; import org.apache.hadoop.yarn.util.Records; @@ -212,6 +214,32 @@ public abstract class AllocateRequest { public abstract void setUpdateRequests( List updateRequests); + /** + * Get the list of Scheduling requests being sent by the + * ApplicationMaster. + * @return list of {@link SchedulingRequest} being sent by the + * ApplicationMaster. + */ + @Public + @Unstable + public List getSchedulingRequests() { +return Collections.EMPTY_LIST; + } + + /** + * Set the list of Scheduling requests to inform the + * ResourceManager about the application's resource requirements + * (potentially including allocation tags & placement constraints). + * @param schedulingRequests list of SchedulingRequest to update + * the ResourceManager about the application's resource + * requirements. + */ + @Public + @Unstable + public void setSchedulingRequests( + List schedulingRequests) { + } + @Public @Unstable public static AllocateRequestBuilder newBuilder() { @@ -314,6 +342,20 @@ public abstract class AllocateRequest { } /** + * Set the schedulingRequests of the request. + * @see AllocateRequest#setSchedulingRequests(List) + * @param schedulingRequests SchedulingRequest of the request + * @return {@link AllocateRequestBuilder} + */ +@Public +@Unstable +public AllocateRequestBuilder schedulingRequests( +List schedulingRequests) { + allocateRequest.setSchedulingRequests(schedulingRequests); + return this; +} + +/** * Return generated {@link AllocateRequest} object. * @return {@link AllocateRequest} */ http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9f0e942/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceSizing.java index d82be11..8cdc63f 100644 ---
hadoop git commit: HDFS-12801. RBF: Set MountTableResolver as default file resolver. Contributed by Inigo Goiri.
Repository: hadoop Updated Branches: refs/heads/branch-3.0 6681e7238 -> 3cd522469 HDFS-12801. RBF: Set MountTableResolver as default file resolver. Contributed by Inigo Goiri. (cherry picked from commit e182e777947a85943504a207deb3cf3ffc047910) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cd52246 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cd52246 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cd52246 Branch: refs/heads/branch-3.0 Commit: 3cd52246935b6ab9976ad428424bceca5a79d7aa Parents: 6681e72 Author: Inigo GoiriAuthored: Thu Nov 16 16:58:47 2017 -0800 Committer: Inigo Goiri Committed: Fri Nov 17 10:08:04 2017 -0800 -- .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cd52246/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index a01c230..158786c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4883,7 +4883,7 @@ dfs.federation.router.file.resolver.client.class -org.apache.hadoop.hdfs.server.federation.MockResolver + org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver Class to resolve files to subclusters. - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-12801. RBF: Set MountTableResolver as default file resolver. Contributed by Inigo Goiri.
Repository: hadoop Updated Branches: refs/heads/branch-2.9 9eab9a25c -> ee267565b HDFS-12801. RBF: Set MountTableResolver as default file resolver. Contributed by Inigo Goiri. (cherry picked from commit e182e777947a85943504a207deb3cf3ffc047910) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ee267565 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ee267565 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ee267565 Branch: refs/heads/branch-2.9 Commit: ee267565b57d4cac826f1fc74af05e1fe1ec Parents: 9eab9a2 Author: Inigo GoiriAuthored: Thu Nov 16 16:58:47 2017 -0800 Committer: Inigo Goiri Committed: Fri Nov 17 10:06:46 2017 -0800 -- .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ee267565/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 8af7e4c..88ee8af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4563,7 +4563,7 @@ dfs.federation.router.file.resolver.client.class -org.apache.hadoop.hdfs.server.federation.MockResolver + org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver Class to resolve files to subclusters. - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-12801. RBF: Set MountTableResolver as default file resolver. Contributed by Inigo Goiri.
Repository: hadoop Updated Branches: refs/heads/branch-2 9cf9627e2 -> b57ea38fc HDFS-12801. RBF: Set MountTableResolver as default file resolver. Contributed by Inigo Goiri. (cherry picked from commit e182e777947a85943504a207deb3cf3ffc047910) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b57ea38f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b57ea38f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b57ea38f Branch: refs/heads/branch-2 Commit: b57ea38fcf96f7095e07cb97fb76d7de5f871d3c Parents: 9cf9627 Author: Inigo GoiriAuthored: Thu Nov 16 16:58:47 2017 -0800 Committer: Inigo Goiri Committed: Fri Nov 17 10:05:45 2017 -0800 -- .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b57ea38f/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 8af7e4c..88ee8af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4563,7 +4563,7 @@ dfs.federation.router.file.resolver.client.class -org.apache.hadoop.hdfs.server.federation.MockResolver + org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver Class to resolve files to subclusters. - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-7218. Decouple YARN Services REST API namespace from RM. (Contributed by Eric Yang)
Repository: hadoop Updated Branches: refs/heads/trunk 5f0b238a1 -> 0940e4f69 YARN-7218. Decouple YARN Services REST API namespace from RM. (Contributed by Eric Yang) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0940e4f6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0940e4f6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0940e4f6 Branch: refs/heads/trunk Commit: 0940e4f692441f16e742666ac925f71a083eab27 Parents: 5f0b238 Author: Eric YangAuthored: Fri Nov 17 12:28:12 2017 -0500 Committer: Eric Yang Committed: Fri Nov 17 12:28:12 2017 -0500 -- .../hadoop/yarn/service/TestApiServer.java | 4 +- .../yarn/service/api/records/Component.java | 51 +--- .../service/api/records/ReadinessCheck.java | 14 +- .../yarn/service/api/records/Service.java | 15 +++--- .../yarn/service/conf/RestApiConstants.java | 2 +- .../org/apache/hadoop/yarn/webapp/WebApps.java | 10 +++- .../server/resourcemanager/ResourceManager.java | 14 +- .../server/resourcemanager/webapp/RMWebApp.java | 15 -- 8 files changed, 77 insertions(+), 48 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0940e4f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java index 2b22474..896b2f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services-api/src/test/java/org/apache/hadoop/yarn/service/TestApiServer.java @@ -62,8 +62,8 @@ public class TestApiServer { this.apiServer.getClass().isAnnotationPresent(Path.class)); final Path path = this.apiServer.getClass() .getAnnotation(Path.class); -assertEquals("The path has /ws/v1 annotation", path.value(), -"/ws/v1"); +assertEquals("The path has /v1 annotation", path.value(), +"/v1"); } @Test http://git-wip-us.apache.org/repos/asf/hadoop/blob/0940e4f6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java index fe9c043..ce0e0cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/api/records/Component.java @@ -26,6 +26,8 @@ import java.util.Collections; import java.util.List; import java.util.Objects; +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; @@ -46,22 +48,53 @@ import org.apache.hadoop.classification.InterfaceStability; @ApiModel(description = "One or more components of the service. If the service is HBase say, then the component can be a simple role like master or regionserver. If the service is a complex business webapp then a component can be other services say Kafka or Storm. Thereby it opens up the support for complex and nested services.") @javax.annotation.Generated(value = "class io.swagger.codegen.languages.JavaClientCodegen", date = "2016-06-02T08:15:05.615-07:00") @XmlRootElement +@XmlAccessorType(XmlAccessType.FIELD) @JsonInclude(JsonInclude.Include.NON_NULL) public class Component implements Serializable { private static final long serialVersionUID = -8430058381509087805L; + @JsonProperty("name") private String name = null; + + @JsonProperty("dependencies") private List dependencies = new ArrayList(); + +
[1/2] hadoop git commit: YARN-1015. FS should watch node resource utilization and allocate opportunistic containers if appropriate.
Repository: hadoop Updated Branches: refs/heads/YARN-1011 a4cfabf28 -> 561410c78 http://git-wip-us.apache.org/repos/asf/hadoop/blob/561410c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 42d4f81..e70053c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -55,13 +55,19 @@ import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerExitStatus; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.ExecutionType; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.ResourceUtilization; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; @@ -71,6 +77,8 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.security.YarnAuthorizationProvider; +import org.apache.hadoop.yarn.server.api.records.OverAllocationInfo; +import org.apache.hadoop.yarn.server.api.records.ResourceThresholds; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; @@ -92,6 +100,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.UpdatedContainerInfo; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; @@ -1054,15 +1063,15 @@ public class TestFairScheduler extends FairSchedulerTestBase { assertEquals( FairSchedulerConfiguration.DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB, scheduler.getQueueManager().getQueue("queue1"). -getResourceUsage().getMemorySize()); +getGuaranteedResourceUsage().getMemorySize()); NodeUpdateSchedulerEvent updateEvent2 = new NodeUpdateSchedulerEvent(node2); scheduler.handle(updateEvent2); assertEquals(1024, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemorySize()); +getGuaranteedResourceUsage().getMemorySize()); assertEquals(2, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getVirtualCores()); +getGuaranteedResourceUsage().getVirtualCores()); // verify metrics QueueMetrics queue1Metrics = scheduler.getQueueManager().getQueue("queue1") @@ -1097,7 +1106,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure queue 1 is allocated app capacity assertEquals(1024, scheduler.getQueueManager().getQueue("queue1"). -getResourceUsage().getMemorySize()); +getGuaranteedResourceUsage().getMemorySize()); // Now queue 2 requests likewise ApplicationAttemptId attId = createSchedulingRequest(1024, "queue2", "user1", 1); @@ -1107,7 +1116,7 @@ public class TestFairScheduler extends
[2/2] hadoop git commit: YARN-1015. FS should watch node resource utilization and allocate opportunistic containers if appropriate.
YARN-1015. FS should watch node resource utilization and allocate opportunistic containers if appropriate. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/561410c7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/561410c7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/561410c7 Branch: refs/heads/YARN-1011 Commit: 561410c78bf0855b86f9b2e8065125e9b0cfede4 Parents: a4cfabf Author: Haibo ChenAuthored: Fri Nov 17 07:47:32 2017 -0800 Committer: Haibo Chen Committed: Fri Nov 17 07:47:32 2017 -0800 -- .../sls/scheduler/FairSchedulerMetrics.java | 4 +- .../hadoop/yarn/conf/YarnConfiguration.java | 5 + .../src/main/resources/yarn-default.xml | 13 + .../scheduler/SchedulerNode.java| 48 ++ .../scheduler/fair/FSAppAttempt.java| 166 --- .../scheduler/fair/FSLeafQueue.java | 51 +- .../scheduler/fair/FSParentQueue.java | 36 +- .../resourcemanager/scheduler/fair/FSQueue.java | 39 +- .../scheduler/fair/FairScheduler.java | 97 ++-- .../fair/FairSchedulerConfiguration.java| 5 + .../scheduler/fair/Schedulable.java | 17 +- .../DominantResourceFairnessPolicy.java | 8 +- .../fair/policies/FairSharePolicy.java | 4 +- .../webapp/dao/FairSchedulerQueueInfo.java | 2 +- .../yarn/server/resourcemanager/MockNodes.java | 60 ++- .../TestWorkPreservingRMRestart.java| 2 +- .../scheduler/fair/FakeSchedulable.java | 9 +- .../scheduler/fair/TestAppRunnability.java | 9 +- .../scheduler/fair/TestFSAppAttempt.java| 4 +- .../scheduler/fair/TestFSLeafQueue.java | 4 +- .../scheduler/fair/TestFSSchedulerNode.java | 4 +- .../scheduler/fair/TestFairScheduler.java | 468 +-- .../scheduler/fair/TestSchedulingPolicy.java| 10 +- 23 files changed, 861 insertions(+), 204 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/561410c7/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java -- diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java index a5aee74..1f4e7c7 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java @@ -75,7 +75,7 @@ public class FairSchedulerMetrics extends SchedulerMetrics { case DEMAND: return schedulable.getDemand().getMemorySize(); case USAGE: -return schedulable.getResourceUsage().getMemorySize(); +return schedulable.getGuaranteedResourceUsage().getMemorySize(); case MINSHARE: return schedulable.getMinShare().getMemorySize(); case MAXSHARE: @@ -96,7 +96,7 @@ public class FairSchedulerMetrics extends SchedulerMetrics { case DEMAND: return schedulable.getDemand().getVirtualCores(); case USAGE: -return schedulable.getResourceUsage().getVirtualCores(); +return schedulable.getGuaranteedResourceUsage().getVirtualCores(); case MINSHARE: return schedulable.getMinShare().getVirtualCores(); case MAXSHARE: http://git-wip-us.apache.org/repos/asf/hadoop/blob/561410c7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index f9c8b69..68c7acf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -266,6 +266,11 @@ public class YarnConfiguration extends Configuration { /** UserGroupMappingPlacementRule configuration string. */ public static final String USER_GROUP_PLACEMENT_RULE = "user-group"; + public static final String RM_SCHEDULER_OVERSUBSCRIPTION_ENABLED = + RM_PREFIX + "scheduler.oversubscription.enabled"; + public static final boolean DEFAULT_RM_SCHEDULER_OVERSUBSCRIPTION_ENABLED + = false; + /** Enable Resource Manager webapp ui actions */ public static
[33/46] hadoop git commit: YARN-7503. Configurable heap size / JVM opts in service AM. Contributed by Jonathan Hung
YARN-7503. Configurable heap size / JVM opts in service AM. Contributed by Jonathan Hung Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6bf2c301 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6bf2c301 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6bf2c301 Branch: refs/heads/YARN-5881 Commit: 6bf2c301924a3acae5a7510b8473f6292a5a471b Parents: 28d0fcb Author: Jian HeAuthored: Thu Nov 16 10:53:55 2017 -0800 Committer: Jian He Committed: Thu Nov 16 10:53:55 2017 -0800 -- .../apache/hadoop/yarn/service/client/ServiceClient.java | 10 +- .../apache/hadoop/yarn/service/conf/YarnServiceConf.java | 5 + .../service/containerlaunch/JavaCommandLineBuilder.java | 11 +-- 3 files changed, 15 insertions(+), 11 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf2c301/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index af43f8a..d1b6026 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -559,7 +559,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes, Map env = addAMEnv(); // create AM CLI -String cmdStr = buildCommandLine(serviceName, conf, appRootDir, hasAMLog4j); +String cmdStr = buildCommandLine(app, conf, appRootDir, hasAMLog4j); submissionContext.setResource(Resource.newInstance(YarnServiceConf .getLong(YarnServiceConf.AM_RESOURCE_MEM, YarnServiceConf.DEFAULT_KEY_AM_RESOURCE_MEM, app.getConfiguration(), @@ -624,12 +624,12 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes, LOG.debug(builder.toString()); } - private String buildCommandLine(String serviceName, Configuration conf, + private String buildCommandLine(Service app, Configuration conf, Path appRootDir, boolean hasSliderAMLog4j) throws BadConfigException { JavaCommandLineBuilder CLI = new JavaCommandLineBuilder(); CLI.forceIPv4().headless(); -//TODO CLI.setJVMHeap -//TODO CLI.addJVMOPTS +CLI.setJVMOpts(YarnServiceConf.get(YarnServiceConf.JVM_OPTS, null, +app.getConfiguration(), conf)); if (hasSliderAMLog4j) { CLI.sysprop(SYSPROP_LOG4J_CONFIGURATION, YARN_SERVICE_LOG4J_FILENAME); CLI.sysprop(SYSPROP_LOG_DIR, ApplicationConstants.LOG_DIR_EXPANSION_VAR); @@ -637,7 +637,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes, CLI.add(ServiceMaster.class.getCanonicalName()); //TODO debugAM CLI.add(Arguments.ARG_DEBUG) CLI.add("-" + ServiceMaster.YARNFILE_OPTION, new Path(appRootDir, -serviceName + ".json")); +app.getName() + ".json")); // pass the registry binding CLI.addConfOptionToCLI(conf, RegistryConstants.KEY_REGISTRY_ZK_ROOT, RegistryConstants.DEFAULT_ZK_REGISTRY_ROOT); http://git-wip-us.apache.org/repos/asf/hadoop/blob/6bf2c301/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java index a7bd58d..684d980 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/conf/YarnServiceConf.java +++
[26/46] hadoop git commit: Revert "HDFS-12681. Fold HdfsLocatedFileStatus into HdfsFileStatus."
Revert "HDFS-12681. Fold HdfsLocatedFileStatus into HdfsFileStatus." This reverts commit b85603e3f85e85da406241b991f3a9974384c3aa. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/675e9a8f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/675e9a8f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/675e9a8f Branch: refs/heads/YARN-5881 Commit: 675e9a8f57570771a0219d95940681b067d36b94 Parents: b1941b2 Author: Chris DouglasAuthored: Wed Nov 15 19:17:46 2017 -0800 Committer: Chris Douglas Committed: Wed Nov 15 19:20:37 2017 -0800 -- .../org/apache/hadoop/fs/LocatedFileStatus.java | 11 +- .../main/java/org/apache/hadoop/fs/Hdfs.java| 4 +- .../hadoop/hdfs/DistributedFileSystem.java | 4 +- .../hadoop/hdfs/protocol/HdfsFileStatus.java| 80 -- .../hdfs/protocol/HdfsLocatedFileStatus.java| 110 +++ .../hadoop/hdfs/protocolPB/PBHelperClient.java | 65 +-- .../dev-support/findbugsExcludeFile.xml | 7 +- .../apache/hadoop/hdfs/server/mover/Mover.java | 6 +- .../server/namenode/FSDirStatAndListingOp.java | 61 +- .../hadoop/hdfs/TestBlockStoragePolicy.java | 10 +- .../apache/hadoop/hdfs/TestDFSOutputStream.java | 2 +- .../hdfs/server/mover/TestStorageMover.java | 8 +- 12 files changed, 217 insertions(+), 151 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/675e9a8f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java index 9cc81d3..29e1998 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocatedFileStatus.java @@ -135,16 +135,7 @@ public class LocatedFileStatus extends FileStatus { public BlockLocation[] getBlockLocations() { return locations; } - - /** - * Hook for subclasses to lazily set block locations. The {@link #locations} - * field should be null before this is called. - * @param locations Block locations for this instance. - */ - protected void setBlockLocations(BlockLocation[] locations) { -this.locations = locations; - } - + /** * Compare this FileStatus to another FileStatus * @param o the FileStatus to be compared. http://git-wip-us.apache.org/repos/asf/hadoop/blob/675e9a8f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java index f306d06..0138195 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/fs/Hdfs.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hdfs.client.impl.CorruptFileBlockIterator; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.AccessControlException; @@ -187,7 +188,8 @@ public class Hdfs extends AbstractFileSystem { @Override public LocatedFileStatus next() throws IOException { -return getNext().makeQualifiedLocated(getUri(), p); +return ((HdfsLocatedFileStatus)getNext()).makeQualifiedLocated( +getUri(), p); } }; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/675e9a8f/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index eef83d7..9db12e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -85,6
[28/46] hadoop git commit: YARN-7492. Set up SASS for new YARN UI styling. Contributed by Vasudevan Skm.
YARN-7492. Set up SASS for new YARN UI styling. Contributed by Vasudevan Skm. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09a13426 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09a13426 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09a13426 Branch: refs/heads/YARN-5881 Commit: 09a13426086aed9d1bc63a844858dcac560763a6 Parents: 675e9a8 Author: Sunil GAuthored: Thu Nov 16 13:44:21 2017 +0530 Committer: Sunil G Committed: Thu Nov 16 13:44:21 2017 +0530 -- .gitignore | 3 + .../main/webapp/app/components/nodes-heatmap.js | 6 +- ...er-app-memusage-by-nodes-stacked-barchart.js | 2 +- .../main/webapp/app/components/tree-selector.js | 6 +- .../src/main/webapp/app/styles/app.css | 717 -- .../src/main/webapp/app/styles/app.scss | 723 +++ .../src/main/webapp/app/styles/colors.scss | 37 + .../src/main/webapp/app/styles/variables.scss | 40 + .../src/main/webapp/app/utils/color-utils.js| 8 +- .../hadoop-yarn-ui/src/main/webapp/package.json | 3 +- .../hadoop-yarn-ui/src/main/webapp/yarn.lock| 674 - 11 files changed, 1477 insertions(+), 742 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/09a13426/.gitignore -- diff --git a/.gitignore b/.gitignore index 70c1f23..440708a 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ *.sdf *.suo *.vcxproj.user +*.patch .idea .svn .classpath @@ -45,3 +46,5 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp yarnregistry.pdf patchprocess/ .history/ +hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/package-lock.json +hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn-error.log http://git-wip-us.apache.org/repos/asf/hadoop/blob/09a13426/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js index 1ea655b..7802d42 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/nodes-heatmap.js @@ -122,8 +122,8 @@ export default BaseChartComponent.extend({ var xOffset = layout.margin; var yOffset = layout.margin * 3; -var gradientStartColor = "#2ca02c"; -var gradientEndColor = "#ffb014"; +var gradientStartColor = "#60cea5"; +var gradientEndColor = "#ffbc0b"; var colorFunc = d3.interpolateRgb(d3.rgb(gradientStartColor), d3.rgb(gradientEndColor)); @@ -138,7 +138,7 @@ export default BaseChartComponent.extend({ var rect = g.append("rect") .attr("x", sampleXOffset) .attr("y", sampleYOffset) -.attr("fill", this.selectedCategory === i ? "#2c7bb6" : colorFunc(ratio)) +.attr("fill", this.selectedCategory === i ? "#26bbf0" : colorFunc(ratio)) .attr("width", this.SAMPLE_CELL_WIDTH) .attr("height", this.SAMPLE_HEIGHT) .attr("class", "hyperlink"); http://git-wip-us.apache.org/repos/asf/hadoop/blob/09a13426/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js index 65cbaf5..c01fe36 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/per-app-memusage-by-nodes-stacked-barchart.js @@ -74,7 +74,7 @@ export default StackedBarchart.extend({ didInsertElement: function() { this.initChart(true); -this.colors = ["Orange", "Grey", "LimeGreen"]; +this.colors = ["lightsalmon", "Grey", "mediumaquamarine"]; var containers = this.get("rmContainers"); var nodes = this.get("nodes"); http://git-wip-us.apache.org/repos/asf/hadoop/blob/09a13426/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/tree-selector.js -- diff --git
[24/46] hadoop git commit: YARN-7361. Improve the docker container runtime documentation. Contributed by Shane Kumpf
YARN-7361. Improve the docker container runtime documentation. Contributed by Shane Kumpf Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fac72eef Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fac72eef Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fac72eef Branch: refs/heads/YARN-5881 Commit: fac72eef23bb0a74a34f289dd6ef50ffa4303aa4 Parents: b246c54 Author: Jason LoweAuthored: Wed Nov 15 12:32:02 2017 -0600 Committer: Jason Lowe Committed: Wed Nov 15 12:32:02 2017 -0600 -- .../src/site/markdown/DockerContainers.md | 26 1 file changed, 21 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/fac72eef/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md index 36c391a..dbbce7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/DockerContainers.md @@ -17,11 +17,15 @@ Launching Applications Using Docker Containers -Notice --- -This feature is experimental and is not complete. Enabling this feature and -running Docker containers in your cluster has security implications. -Please do a security analysis before enabling this feature. +Security Warning +--- +**IMPORTANT** This feature is experimental and is not complete. **IMPORTANT** +Enabling this feature and running Docker containers in your cluster has security +implications. With this feature enabled, it may be possible to gain root access +to the YARN NodeManager hosts. Given Docker's integration with many powerful +kernel features, it is imperative that administrators understand +[Docker security](https://docs.docker.com/engine/security/security/) before +enabling this feature. Overview @@ -153,6 +157,18 @@ The following properties should be set in yarn-site.xml: privileged contains if privileged containers are allowed. + + +yarn.nodemanager.runtime.linux.docker.capabilities + CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP,SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE + + Optional. This configuration setting determines the capabilities + assigned to docker containers when they are launched. While these may not + be case-sensitive from a docker perspective, it is best to keep these + uppercase. To run without any capabilites, set this value to + "none" or "NONE" + + ``` - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[39/46] hadoop git commit: YARN-7411. Inter-Queue preemption's computeFixpointAllocation need to handle absolute resources while computing normalizedGuarantee. (Sunil G via wangda)
YARN-7411. Inter-Queue preemption's computeFixpointAllocation need to handle absolute resources while computing normalizedGuarantee. (Sunil G via wangda) Change-Id: I41b1d7558c20fc4eb2050d40134175a2ef6330cb Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d28cb1b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d28cb1b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d28cb1b Branch: refs/heads/YARN-5881 Commit: 5d28cb1b7948e8a7370b117394e8227f999a033a Parents: c0b03ff Author: Wangda TanAuthored: Mon Nov 13 16:26:27 2017 -0800 Committer: Sunil G Committed: Fri Nov 17 19:59:32 2017 +0530 -- .../api/records/impl/pb/ResourcePBImpl.java | 12 .../resource/DefaultResourceCalculator.java | 8 +++ .../resource/DominantResourceCalculator.java| 21 ++ .../yarn/util/resource/ResourceCalculator.java | 14 +++- .../hadoop/yarn/util/resource/Resources.java| 5 ++ .../AbstractPreemptableResourceCalculator.java | 24 ++- .../monitor/capacity/TempQueuePerPartition.java | 12 ++-- ...alCapacityPreemptionPolicyMockFramework.java | 14 ...pacityPreemptionPolicyForNodePartitions.java | 76 9 files changed, 166 insertions(+), 20 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d28cb1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java index 401e0c0..4f90133 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java @@ -26,7 +26,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.exceptions.ResourceNotFoundException; -import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceInformationProto; @@ -152,17 +151,6 @@ public class ResourcePBImpl extends Resource { .newInstance(ResourceInformation.VCORES); this.setMemorySize(p.getMemory()); this.setVirtualCores(p.getVirtualCores()); - -// Update missing resource information on respective index. -updateResourceInformationMap(types); - } - - private void updateResourceInformationMap(ResourceInformation[] types) { -for (int i = 0; i < types.length; i++) { - if (resources[i] == null) { -resources[i] = ResourceInformation.newInstance(types[i]); - } -} } private static ResourceInformation newDefaultInformation( http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d28cb1b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java index aefa85c..6375c4a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java @@ -112,6 +112,14 @@ public class DefaultResourceCalculator extends ResourceCalculator { } @Override + public Resource multiplyAndNormalizeUp(Resource r, double[] by, + Resource stepFactor) { +return Resources.createResource( +roundUp((long) (r.getMemorySize() * by[0] + 0.5), +stepFactor.getMemorySize())); + } + + @Override public Resource multiplyAndNormalizeDown(Resource r, double by, Resource stepFactor) { return Resources.createResource(
[37/46] hadoop git commit: HDFS-12801. RBF: Set MountTableResolver as default file resolver. Contributed by Inigo Goiri.
HDFS-12801. RBF: Set MountTableResolver as default file resolver. Contributed by Inigo Goiri. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e182e777 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e182e777 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e182e777 Branch: refs/heads/YARN-5881 Commit: e182e777947a85943504a207deb3cf3ffc047910 Parents: 0987a7b Author: Inigo GoiriAuthored: Thu Nov 16 16:58:47 2017 -0800 Committer: Inigo Goiri Committed: Thu Nov 16 16:58:47 2017 -0800 -- .../hadoop-hdfs/src/main/resources/hdfs-default.xml| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e182e777/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 28621ba..7ff91f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -4883,7 +4883,7 @@ dfs.federation.router.file.resolver.client.class -org.apache.hadoop.hdfs.server.federation.MockResolver + org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver Class to resolve files to subclusters. - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[17/46] hadoop git commit: YARN-7488. Make ServiceClient.getAppId method public to return ApplicationId for a service name. Contributed by Gour Saha
YARN-7488. Make ServiceClient.getAppId method public to return ApplicationId for a service name. Contributed by Gour Saha Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f40cd31 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f40cd31 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f40cd31 Branch: refs/heads/YARN-5881 Commit: 4f40cd314ab14f735a465fb9dff2dc1bf118e703 Parents: 8b12574 Author: Jian HeAuthored: Mon Nov 13 18:55:12 2017 -0800 Committer: Jian He Committed: Mon Nov 13 18:57:56 2017 -0800 -- .../java/org/apache/hadoop/yarn/service/client/ServiceClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f40cd31/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java index 11cd30d..af43f8a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java @@ -943,7 +943,7 @@ public class ServiceClient extends AppAdminClient implements SliderExitCodes, UserGroupInformation.getCurrentUser(), rpc, address); } - private synchronized ApplicationId getAppId(String serviceName) + public synchronized ApplicationId getAppId(String serviceName) throws IOException, YarnException { if (cachedAppIds.containsKey(serviceName)) { return cachedAppIds.get(serviceName); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[09/46] hadoop git commit: YARN-7442. [YARN-7069] Limit format of resource type name (Contributed by Wangda Tan via Daniel Templeton)
YARN-7442. [YARN-7069] Limit format of resource type name (Contributed by Wangda Tan via Daniel Templeton) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e512f01 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e512f01 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e512f01 Branch: refs/heads/YARN-5881 Commit: 2e512f016ed689b5afbf1e27fdcd7c9f75b6dc9c Parents: fa4b5c6 Author: Daniel TempletonAuthored: Mon Nov 13 10:37:30 2017 -0800 Committer: Daniel Templeton Committed: Mon Nov 13 11:03:30 2017 -0800 -- .../yarn/api/records/ResourceInformation.java | 5 +++ .../yarn/util/resource/ResourceUtils.java | 26 ++ .../yarn/util/resource/TestResourceUtils.java | 37 3 files changed, 68 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e512f01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java index 59908ef..67592cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceInformation.java @@ -65,6 +65,11 @@ public class ResourceInformation implements Comparable { /** * Set the name for the resource. * + * A valid resource name must begin with a letter and contain only letters, + * numbers, and any of: '.', '_', or '-'. A valid resource name may also be + * optionally preceded by a name space followed by a slash. A valid name space + * consists of period-separated groups of letters, numbers, and dashes." + * * @param rName name for the resource */ public void setName(String rName) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e512f01/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java index 1170c72..3deace8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java @@ -62,6 +62,10 @@ public class ResourceUtils { private static final Pattern RESOURCE_REQUEST_VALUE_PATTERN = Pattern.compile("^([0-9]+) ?([a-zA-Z]*)$"); + private static final Pattern RESOURCE_NAME_PATTERN = Pattern.compile( + "^(((\\p{Alnum}([\\p{Alnum}-]*\\p{Alnum})?\\.)*" + + "\\p{Alnum}([\\p{Alnum}-]*\\p{Alnum})?)/)?\\p{Alpha}([\\w.-]*)$"); + private static volatile boolean initializedResources = false; private static final Map RESOURCE_NAME_TO_INDEX = new ConcurrentHashMap (); @@ -209,6 +213,23 @@ public class ResourceUtils { } @VisibleForTesting + static void validateNameOfResourceNameAndThrowException(String resourceName) + throws YarnRuntimeException { +Matcher matcher = RESOURCE_NAME_PATTERN.matcher(resourceName); +if (!matcher.matches()) { + String message = String.format( + "'%s' is not a valid resource name. A valid resource name must" + + " begin with a letter and contain only letters, numbers, " + + "and any of: '.', '_', or '-'. A valid resource name may also" + + " be optionally preceded by a name space followed by a slash." + + " A valid name space consists of period-separated groups of" + + " letters, numbers, and dashes.", + resourceName); + throw new YarnRuntimeException(message); +} + } + + @VisibleForTesting static void initializeResourcesMap(Configuration conf) { Map resourceInformationMap = new HashMap<>(); @@ -246,6 +267,11 @@ public class ResourceUtils { } } +// Validate names of resource information map. +for (String name : resourceInformationMap.keySet()) { + validateNameOfResourceNameAndThrowException(name); +} +
[42/46] hadoop git commit: YARN-6471. Support to add min/max resource configuration for a queue. (Sunil G via wangda)
YARN-6471. Support to add min/max resource configuration for a queue. (Sunil G via wangda) Change-Id: I9213f5297a6841fab5c573e85ee4c4e5f4a0b7ff Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/942f6f59 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/942f6f59 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/942f6f59 Branch: refs/heads/YARN-5881 Commit: 942f6f59f7d6bbbaa9986b5a4a3c5a191bc76792 Parents: 5f0b238 Author: Wangda TanAuthored: Fri Aug 11 10:30:23 2017 -0700 Committer: Sunil G Committed: Fri Nov 17 19:59:32 2017 +0530 -- .../org/apache/hadoop/util/StringUtils.java | 31 ++ .../resource/DefaultResourceCalculator.java | 6 + .../resource/DominantResourceCalculator.java| 7 + .../yarn/util/resource/ResourceCalculator.java | 12 + .../hadoop/yarn/util/resource/Resources.java| 5 + .../capacity/FifoCandidatesSelector.java| 9 +- .../ProportionalCapacityPreemptionPolicy.java | 10 +- .../monitor/capacity/TempQueuePerPartition.java | 16 +- .../scheduler/AbstractResourceUsage.java| 198 +++ .../scheduler/QueueResourceQuotas.java | 153 ++ .../scheduler/ResourceUsage.java| 237 ++--- .../scheduler/capacity/AbstractCSQueue.java | 162 +- .../scheduler/capacity/CSQueue.java | 39 ++ .../scheduler/capacity/CSQueueUtils.java| 24 +- .../CapacitySchedulerConfiguration.java | 179 ++- .../scheduler/capacity/LeafQueue.java | 31 +- .../scheduler/capacity/ParentQueue.java | 201 +++- .../scheduler/capacity/UsersManager.java| 5 +- .../PriorityUtilizationQueueOrderingPolicy.java | 11 + .../webapp/dao/CapacitySchedulerQueueInfo.java | 15 + .../yarn/server/resourcemanager/MockNM.java | 8 + .../yarn/server/resourcemanager/MockRM.java | 6 + ...alCapacityPreemptionPolicyMockFramework.java | 13 + ...estProportionalCapacityPreemptionPolicy.java | 29 +- ...pacityPreemptionPolicyIntraQueueWithDRF.java | 6 +- .../TestAbsoluteResourceConfiguration.java | 516 +++ .../capacity/TestApplicationLimits.java | 30 +- .../TestApplicationLimitsByPartition.java | 4 + .../capacity/TestCapacityScheduler.java | 2 +- .../scheduler/capacity/TestChildQueueOrder.java | 2 + .../scheduler/capacity/TestLeafQueue.java | 261 -- .../scheduler/capacity/TestParentQueue.java | 8 + .../scheduler/capacity/TestReservations.java| 19 + ...tPriorityUtilizationQueueOrderingPolicy.java | 3 + .../webapp/TestRMWebServicesCapacitySched.java | 4 +- 35 files changed, 1831 insertions(+), 431 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/942f6f59/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java index cda5ec7..1be8a08 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/StringUtils.java @@ -1152,4 +1152,35 @@ public class StringUtils { return s1.equalsIgnoreCase(s2); } + /** + * Checks if the String contains only unicode letters. + * + * null will return false. + * An empty String (length()=0) will return true. + * + * + * StringUtils.isAlpha(null) = false + * StringUtils.isAlpha("") = true + * StringUtils.isAlpha(" ") = false + * StringUtils.isAlpha("abc") = true + * StringUtils.isAlpha("ab2c") = false + * StringUtils.isAlpha("ab-c") = false + * + * + * @param str the String to check, may be null + * @return true if only contains letters, and is non-null + */ + public static boolean isAlpha(String str) { + if (str == null) { + return false; + } + int sz = str.length(); + for (int i = 0; i < sz; i++) { + if (Character.isLetter(str.charAt(i)) == false) { + return false; + } + } + return true; + } + } http://git-wip-us.apache.org/repos/asf/hadoop/blob/942f6f59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
[21/46] hadoop git commit: HADOOP-14876. Create downstream developer docs from the compatibility guidelines
HADOOP-14876. Create downstream developer docs from the compatibility guidelines Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69043ba8 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69043ba8 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69043ba8 Branch: refs/heads/YARN-5881 Commit: 69043ba8b5da2d66a80b6209915da1a0865ca46f Parents: 2f379d4 Author: Daniel TempletonAuthored: Tue Nov 14 13:19:14 2017 -0800 Committer: Daniel Templeton Committed: Wed Nov 15 10:03:29 2017 -0800 -- .../src/site/markdown/Compatibility.md | 132 -- .../src/site/markdown/DownstreamDev.md | 432 +++ hadoop-project/src/site/site.xml| 3 +- 3 files changed, 528 insertions(+), 39 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/69043ba8/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md -- diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md index 47fa09a..461ff17 100644 --- a/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md +++ b/hadoop-common-project/hadoop-common/src/site/markdown/Compatibility.md @@ -63,6 +63,14 @@ when the various labels are appropriate. As a general rule, all new interfaces and APIs should have the most limited labels (e.g. Private Unstable) that will not inhibit the intent of the interface or API. +### Structure + +This document is arranged in sections according to the various compatibility +concerns. Within each section an introductory text explains what compatibility +means in that section, why it's important, and what the intent to support +compatibility is. The subsequent "Policy" section then sets forth in specific +terms what the governing policy is. + ### Notational Conventions The key words "MUST" "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", @@ -77,12 +85,18 @@ flagged for removal. The standard meaning of the annotation is that the API element should not be used and may be removed in a later version. In all cases removing an element from an API is an incompatible -change. In the case of [Stable](./InterfaceClassification.html#Stable) APIs, -the change cannot be made between minor releases within the same major -version. In addition, to allow consumers of the API time to adapt to the change, -the API element to be removed should be marked as deprecated for a full major -release before it is removed. For example, if a method is marked as deprecated -in Hadoop 2.8, it cannot be removed until Hadoop 4.0. +change. The stability of the element SHALL determine when such a change is +permissible. A [Stable](./InterfaceClassification.html#Stable) element MUST +be marked as deprecated for a full major release before it can be removed and +SHALL NOT be removed in a minor or maintenance release. An +[Evolving](./InterfaceClassification.html#Evolving) element MUST be marked as +deprecated for a full minor release before it can be removed and SHALL NOT be +removed during a maintenance release. An +[Unstable](./InterfaceClassification.html#Unstable) element MAY be removed at +any time. When possible an [Unstable](./InterfaceClassification.html#Unstable) +element SHOULD be marked as deprecated for at least one release before being +removed. For example, if a method is marked as deprecated in Hadoop 2.8, it +cannot be removed until Hadoop 4.0. ### Policy @@ -141,7 +155,7 @@ in hand. Semantic compatibility Apache Hadoop strives to ensure that the behavior of APIs remains consistent -over versions, though changes for correctness may result in changes in +across releases, though changes for correctness may result in changes in behavior. API behavior SHALL be specified by the JavaDoc API documentation where present and complete. When JavaDoc API documentation is not available, behavior SHALL be specified by the behavior expected by the related unit tests. @@ -229,8 +243,8 @@ transports, such as SSL. Upgrading a service from SSLv2 to SSLv3 may break existing SSLv2 clients. The minimum supported major version of any transports MUST not increase across minor releases within a major version. -Service ports are considered as part of the transport mechanism. Fixed -service port numbers MUST be kept consistent to prevent breaking clients. +Service ports are considered as part of the transport mechanism. Default +service port numbers must be kept consistent to prevent breaking clients. Policy @@ -281,9 +295,8 @@ according to the following: * Client-Server compatibility MUST be maintained so as to allow upgrading
[43/46] hadoop git commit: YARN-7483. CapacityScheduler test cases cleanup post YARN-5881. (Sunil G via wangda)
YARN-7483. CapacityScheduler test cases cleanup post YARN-5881. (Sunil G via wangda) Change-Id: I9741a6baf5cb7352d05636efb6c0b24790e7589a Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c7970dce Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c7970dce Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c7970dce Branch: refs/heads/YARN-5881 Commit: c7970dced39e33448f304cb71ac7192193266087 Parents: 01325d4 Author: Wangda TanAuthored: Thu Nov 16 11:35:48 2017 -0800 Committer: Sunil G Committed: Fri Nov 17 19:59:32 2017 +0530 -- .../api/records/impl/pb/ResourcePBImpl.java | 11 ++ .../scheduler/capacity/TestLeafQueue.java | 130 +-- 2 files changed, 131 insertions(+), 10 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7970dce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java index 4f90133..6ebed6e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java @@ -151,6 +151,17 @@ public class ResourcePBImpl extends Resource { .newInstance(ResourceInformation.VCORES); this.setMemorySize(p.getMemory()); this.setVirtualCores(p.getVirtualCores()); + +// Update missing resource information on respective index. +updateResourceInformationMap(types); + } + + private void updateResourceInformationMap(ResourceInformation[] types) { +for (int i = 0; i < types.length; i++) { + if (resources[i] == null) { +resources[i] = ResourceInformation.newInstance(types[i]); + } +} } private static ResourceInformation newDefaultInformation( http://git-wip-us.apache.org/repos/asf/hadoop/blob/c7970dce/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 30bff78..1426e88 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -1027,6 +1027,8 @@ public class TestLeafQueue { Resource clusterResource = Resources.createResource(numNodes * (8*GB), numNodes * 16); when(csContext.getNumClusterNodes()).thenReturn(numNodes); +root.updateClusterResource(clusterResource, +new ResourceLimits(clusterResource)); // Setup resource-requests // app_0 asks for 3 3-GB containers @@ -1083,9 +1085,15 @@ public class TestLeafQueue { a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps); -assertEquals(12*GB, a.getUsedResources().getMemorySize()); -assertEquals(12*GB, app_0.getCurrentConsumption().getMemorySize()); -assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); +assertEquals(9*GB, a.getUsedResources().getMemorySize()); +assertEquals(8*GB, app_0.getCurrentConsumption().getMemorySize()); +assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); + +assertEquals(4 * GB, +app_0.getTotalPendingRequestsPerPartition().get("").getMemorySize()); + +assertEquals(1 * GB, +app_1.getTotalPendingRequestsPerPartition().get("").getMemorySize()); } @SuppressWarnings({ "unchecked", "rawtypes" }) @@ -1317,11 +1325,6 @@ public class TestLeafQueue { Resource clusterResource = Resources.createResource(numNodes * (8*GB),
[30/46] hadoop git commit: YARN-7486. Race condition in service AM that can cause NPE. Contributed by Jian He
YARN-7486. Race condition in service AM that can cause NPE. Contributed by Jian He Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4d5d202 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4d5d202 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4d5d202 Branch: refs/heads/YARN-5881 Commit: f4d5d20286eb05449f6fd7cd6ff0554228205fe2 Parents: 462e25a Author: Billie RinaldiAuthored: Wed Nov 15 10:20:46 2017 -0800 Committer: Billie Rinaldi Committed: Thu Nov 16 07:58:06 2017 -0800 -- .../hadoop/yarn/service/ServiceScheduler.java | 50 - .../yarn/service/component/Component.java | 58 -- .../yarn/service/component/ComponentEvent.java | 11 ++ .../component/instance/ComponentInstance.java | 83 +++--- .../containerlaunch/ContainerLaunchService.java | 2 +- .../provider/AbstractProviderService.java | 5 +- .../yarn/service/provider/ProviderService.java | 5 +- .../yarn/service/provider/ProviderUtils.java| 5 +- .../ServiceTimelinePublisher.java | 5 +- .../hadoop/yarn/service/MockServiceAM.java | 66 --- .../hadoop/yarn/service/ServiceTestUtils.java | 5 +- .../hadoop/yarn/service/TestServiceAM.java | 109 +++ .../service/monitor/TestServiceMonitor.java | 12 ++ 13 files changed, 290 insertions(+), 126 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4d5d202/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java index a7b7e22..6bc5673 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java @@ -132,7 +132,6 @@ public class ServiceScheduler extends CompositeService { private AMRMClientAsync amRMClient; private NMClientAsync nmClient; private AsyncDispatcher dispatcher; - AsyncDispatcher compInstanceDispatcher; private YarnRegistryViewForProviders yarnRegistryOperations; private ServiceContext context; private ContainerLaunchService containerLaunchService; @@ -152,7 +151,7 @@ public class ServiceScheduler extends CompositeService { yarnRegistryOperations = createYarnRegistryOperations(context, registryClient); -// register metrics +// register metrics, serviceMetrics = ServiceMetrics .register(app.getName(), "Metrics for service"); serviceMetrics.tag("type", "Metrics type [component or service]", "service"); @@ -167,14 +166,11 @@ public class ServiceScheduler extends CompositeService { dispatcher = new AsyncDispatcher("Component dispatcher"); dispatcher.register(ComponentEventType.class, new ComponentEventHandler()); +dispatcher.register(ComponentInstanceEventType.class, +new ComponentInstanceEventHandler()); dispatcher.setDrainEventsOnStop(); addIfService(dispatcher); -compInstanceDispatcher = -new AsyncDispatcher("CompInstance dispatcher"); -compInstanceDispatcher.register(ComponentInstanceEventType.class, -new ComponentInstanceEventHandler()); -addIfService(compInstanceDispatcher); containerLaunchService = new ContainerLaunchService(context.fs); addService(containerLaunchService); @@ -277,10 +273,10 @@ public class ServiceScheduler extends CompositeService { } private void recoverComponents(RegisterApplicationMasterResponse response) { -List recoveredContainers = response +List containersFromPrevAttempt = response .getContainersFromPreviousAttempts(); LOG.info("Received {} containers from previous attempt.", -recoveredContainers.size()); +containersFromPrevAttempt.size()); Map existingRecords = new HashMap<>(); List existingComps = null; try { @@ -302,9 +298,8 @@ public class ServiceScheduler extends CompositeService { } } } -for (Container container : recoveredContainers) { - LOG.info("Handling container {} from
[25/46] hadoop git commit: HADOOP-15023. ValueQueue should also validate (int) (lowWatermark * numValues) > 0 on construction.
HADOOP-15023. ValueQueue should also validate (int) (lowWatermark * numValues) > 0 on construction. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1941b20 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1941b20 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1941b20 Branch: refs/heads/YARN-5881 Commit: b1941b200d6b4fa6a7891421c0a1e212cad3d6eb Parents: fac72ee Author: Xiao ChenAuthored: Wed Nov 15 16:43:25 2017 -0800 Committer: Xiao Chen Committed: Wed Nov 15 16:44:06 2017 -0800 -- .../main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java | 6 -- 1 file changed, 4 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1941b20/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java index 8411ffb..1ddd8a3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/ValueQueue.java @@ -224,6 +224,9 @@ public class ValueQueue { Preconditions.checkArgument(numValues > 0, "\"numValues\" must be > 0"); Preconditions.checkArgument(((lowWatermark > 0)&&(lowWatermark <= 1)), "\"lowWatermark\" must be > 0 and <= 1"); +final int watermarkValue = (int) (numValues * lowWatermark); +Preconditions.checkArgument(watermarkValue > 0, +"(int) (\"numValues\" * \"lowWatermark\") must be > 0"); Preconditions.checkArgument(expiry > 0, "\"expiry\" must be > 0"); Preconditions.checkArgument(numFillerThreads > 0, "\"numFillerThreads\" must be > 0"); @@ -243,8 +246,7 @@ public class ValueQueue { throws Exception { LinkedBlockingQueue keyQueue = new LinkedBlockingQueue(); -refiller.fillQueueForKey(keyName, keyQueue, -(int)(lowWatermark * numValues)); +refiller.fillQueueForKey(keyName, keyQueue, watermarkValue); return keyQueue; } }); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[34/46] hadoop git commit: HADOOP-14982. Clients using FailoverOnNetworkExceptionRetry can go into a loop if they're used without authenticating with kerberos in HA env (pbacsko via rkanter)
HADOOP-14982. Clients using FailoverOnNetworkExceptionRetry can go into a loop if they're used without authenticating with kerberos in HA env (pbacsko via rkanter) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2efaf01 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2efaf01 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2efaf01 Branch: refs/heads/YARN-5881 Commit: f2efaf013f7577948061abbb49c6d17c375e92cc Parents: 6bf2c30 Author: Robert KanterAuthored: Thu Nov 16 11:11:19 2017 -0800 Committer: Robert Kanter Committed: Thu Nov 16 11:11:19 2017 -0800 -- .../apache/hadoop/io/retry/RetryPolicies.java | 22 +++- .../apache/hadoop/io/retry/TestRetryProxy.java | 22 .../io/retry/UnreliableImplementation.java | 10 + .../hadoop/io/retry/UnreliableInterface.java| 6 +- 4 files changed, 58 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2efaf01/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java index fa0cb6e..adf23c0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java @@ -32,11 +32,14 @@ import java.util.Map.Entry; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; +import javax.security.sasl.SaslException; + import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.RetriableException; import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.net.ConnectTimeoutException; import org.apache.hadoop.security.token.SecretManager.InvalidToken; +import org.ietf.jgss.GSSException; import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; @@ -663,6 +666,11 @@ public class RetryPolicies { + retries + ") exceeded maximum allowed (" + maxRetries + ")"); } + if (isSaslFailure(e)) { + return new RetryAction(RetryAction.RetryDecision.FAIL, 0, + "SASL failure"); + } + if (e instanceof ConnectException || e instanceof EOFException || e instanceof NoRouteToHostException || @@ -716,7 +724,7 @@ public class RetryPolicies { private static long calculateExponentialTime(long time, int retries) { return calculateExponentialTime(time, retries, Long.MAX_VALUE); } - + private static boolean isWrappedStandbyException(Exception e) { if (!(e instanceof RemoteException)) { return false; @@ -725,6 +733,18 @@ public class RetryPolicies { StandbyException.class); return unwrapped instanceof StandbyException; } + + private static boolean isSaslFailure(Exception e) { + Throwable current = e; + do { + if (current instanceof SaslException) { +return true; + } + current = current.getCause(); + } while (current != null); + + return false; + } static RetriableException getWrappedRetriableException(Exception e) { if (!(e instanceof RemoteException)) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2efaf01/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java -- diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java index 649af89..1accb0a0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/retry/TestRetryProxy.java @@ -39,6 +39,8 @@ import java.util.concurrent.*; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import javax.security.sasl.SaslException; + import static org.apache.hadoop.io.retry.RetryPolicies.*; import static org.junit.Assert.*; import static org.mockito.Matchers.any; @@ -326,4 +328,24 @@ public class TestRetryProxy { assertEquals(InterruptedException.class, e.getCause().getClass()); assertEquals("sleep interrupted", e.getCause().getMessage()); } + + @Test + public void testNoRetryOnSaslError() throws Exception
[27/46] hadoop git commit: YARN-7492. Set up SASS for new YARN UI styling. Contributed by Vasudevan Skm.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/09a13426/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock index dc45d7e..fb35ea7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/yarn.lock @@ -25,6 +25,15 @@ after@0.8.1: version "0.8.1" resolved "https://registry.yarnpkg.com/after/-/after-0.8.1.tgz#ab5d4fb883f596816d3515f8f791c0af486dd627; +ajv@^5.1.0: + version "5.3.0" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-5.3.0.tgz#4414ff74a50879c208ee5fdc826e32c303549eda; + dependencies: +co "^4.6.0" +fast-deep-equal "^1.0.0" +fast-json-stable-stringify "^2.0.0" +json-schema-traverse "^0.3.0" + align-text@^0.1.1, align-text@^0.1.3: version "0.1.4" resolved "https://registry.yarnpkg.com/align-text/-/align-text-0.1.4.tgz#0cd90a561093f35d0a99256c22b7069433fad117; @@ -94,6 +103,10 @@ anymatch@^1.3.0: arrify "^1.0.0" micromatch "^2.1.5" +aproba@^1.0.3: + version "1.2.0" + resolved "https://registry.yarnpkg.com/aproba/-/aproba-1.2.0.tgz#6802e6264efd18c790a1b0d517f0f2627bf2c94a; + archy@~1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/archy/-/archy-1.0.0.tgz#f9c8c13757cc1dd7bc379ac77b2c62a5c2868c40; @@ -132,6 +145,10 @@ array-equal@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/array-equal/-/array-equal-1.0.0.tgz#8c2a5ef2472fd9ea742b04c77a75093ba2757c93; +array-find-index@^1.0.1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/array-find-index/-/array-find-index-1.0.2.tgz#df010aa1287e164bbda6f9723b0a96a1ec4187a1; + array-flatten@1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/array-flatten/-/array-flatten-1.1.1.tgz#9a5f699051b1e7073328f2a008968b64ea2955d2; @@ -202,6 +219,10 @@ async-disk-cache@^1.0.0: rimraf "^2.5.3" rsvp "^3.0.18" +async-foreach@^0.1.3: + version "0.1.3" + resolved "https://registry.yarnpkg.com/async-foreach/-/async-foreach-0.1.3.tgz#36121f845c0578172de419a97dbeb1d16ec34542; + async-some@~1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/async-some/-/async-some-1.0.2.tgz#4d8a81620d5958791b5b98f802d3207776e95509; @@ -230,11 +251,19 @@ async@~0.8.0: version "0.8.0" resolved "https://registry.yarnpkg.com/async/-/async-0.8.0.tgz#ee65ec77298c2ff1456bc4418a052d0f06435112; +asynckit@^0.4.0: + version "0.4.0" + resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79; + aws-sign2@~0.6.0: version "0.6.0" resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.6.0.tgz#14342dd38dbcc94d0e5b87d763cd63612c0e794f; -aws4@^1.2.1: +aws-sign2@~0.7.0: + version "0.7.0" + resolved "https://registry.yarnpkg.com/aws-sign2/-/aws-sign2-0.7.0.tgz#b46e890934a9591f2d2f6f86d7e6a9f1b3fe76a8; + +aws4@^1.2.1, aws4@^1.6.0: version "1.6.0" resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.6.0.tgz#83ef5ca860b2b32e4a0deedee8c771b9db57471e; @@ -375,6 +404,10 @@ balanced-match@^0.4.1: version "0.4.2" resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-0.4.2.tgz#cb3f3e3c732dc0f01ee70b403f302e61d7709838; +balanced-match@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.0.tgz#89b4d199ab2bee49de164ea02b89ce462d71b767; + base64-arraybuffer@0.1.2: version "0.1.2" resolved "https://registry.yarnpkg.com/base64-arraybuffer/-/base64-arraybuffer-0.1.2.tgz#474df4a9f2da24e05df3158c3b1db3c3cd46a154; @@ -473,6 +506,18 @@ boom@2.x.x: dependencies: hoek "2.x.x" +boom@4.x.x: + version "4.3.1" + resolved "https://registry.yarnpkg.com/boom/-/boom-4.3.1.tgz#4f8a3005cb4a7e3889f749030fd25b96e01d2e31; + dependencies: +hoek "4.x.x" + +boom@5.x.x: + version "5.2.0" + resolved "https://registry.yarnpkg.com/boom/-/boom-5.2.0.tgz#5dd9da6ee3a5f302077436290cb717d3f4a54e02; + dependencies: +hoek "4.x.x" + bower-config@0.6.1: version "0.6.1" resolved "https://registry.yarnpkg.com/bower-config/-/bower-config-0.6.1.tgz#7093155688bef44079bf4cb32d189312c87ded60; @@ -507,6 +552,13 @@ brace-expansion@^1.0.0: balanced-match "^0.4.1" concat-map "0.0.1" +brace-expansion@^1.1.7: + version "1.1.8" + resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-1.1.8.tgz#c07b211c7c952ec1f8efd51a77ef0d1d3990a292; + dependencies: +balanced-match "^1.0.0" +concat-map "0.0.1" + braces@^1.8.2: version "1.8.5" resolved "https://registry.yarnpkg.com/braces/-/braces-1.8.5.tgz#ba77962e12dff969d6b76711e914b737857bf6a7; @@ -558,6 +610,17 @@ broccoli-caching-writer@^2.0.0,
[18/46] hadoop git commit: HADOOP-14993. AliyunOSS: Override listFiles and listLocatedStatus. Contributed Genmao Yu
HADOOP-14993. AliyunOSS: Override listFiles and listLocatedStatus. Contributed Genmao Yu Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18621af7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18621af7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18621af7 Branch: refs/heads/YARN-5881 Commit: 18621af7ae8f8ed703245744f8f2a770d07bbfb9 Parents: 4f40cd3 Author: Kai ZhengAuthored: Tue Nov 14 17:58:37 2017 +0800 Committer: Kai Zheng Committed: Tue Nov 14 17:58:37 2017 +0800 -- .../fs/aliyun/oss/AliyunOSSFileSystem.java | 75 +-- .../fs/aliyun/oss/AliyunOSSFileSystemStore.java | 106 .../hadoop/fs/aliyun/oss/AliyunOSSUtils.java| 12 ++ .../fs/aliyun/oss/FileStatusAcceptor.java | 125 +++ .../site/markdown/tools/hadoop-aliyun/index.md | 6 +- 5 files changed, 309 insertions(+), 15 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/18621af7/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java -- diff --git a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java index 3561b02..41d475d 100644 --- a/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java +++ b/hadoop-tools/hadoop-aliyun/src/main/java/org/apache/hadoop/fs/aliyun/oss/AliyunOSSFileSystem.java @@ -28,14 +28,18 @@ import java.util.List; import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocatedFileStatus; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.util.Progressable; @@ -46,6 +50,7 @@ import com.aliyun.oss.model.ObjectMetadata; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.fs.aliyun.oss.AliyunOSSUtils.objectRepresentsDirectory; import static org.apache.hadoop.fs.aliyun.oss.Constants.*; /** @@ -60,6 +65,12 @@ public class AliyunOSSFileSystem extends FileSystem { private Path workingDir; private AliyunOSSFileSystemStore store; private int maxKeys; + private static final PathFilter DEFAULT_FILTER = new PathFilter() { +@Override +public boolean accept(Path file) { + return true; +} + }; @Override public FSDataOutputStream append(Path path, int bufferSize, @@ -302,18 +313,6 @@ public class AliyunOSSFileSystem extends FileSystem { } /** - * Check if OSS object represents a directory. - * - * @param name object key - * @param size object content length - * @return true if object represents a directory - */ - private boolean objectRepresentsDirectory(final String name, - final long size) { -return StringUtils.isNotEmpty(name) && name.endsWith("/") && size == 0L; - } - - /** * Turn a path (relative or otherwise) into an OSS key. * * @param path the path of the file. @@ -404,6 +403,58 @@ public class AliyunOSSFileSystem extends FileSystem { return result.toArray(new FileStatus[result.size()]); } + @Override + public RemoteIterator listFiles( + final Path f, final boolean recursive) throws IOException { +Path qualifiedPath = f.makeQualified(uri, workingDir); +final FileStatus status = getFileStatus(qualifiedPath); +PathFilter filter = new PathFilter() { + @Override + public boolean accept(Path path) { +return status.isFile() || !path.equals(f); + } +}; +FileStatusAcceptor acceptor = +new FileStatusAcceptor.AcceptFilesOnly(qualifiedPath); +return innerList(f, status, filter, acceptor, recursive); + } + + @Override + public RemoteIterator listLocatedStatus(Path f) + throws IOException { +return listLocatedStatus(f, DEFAULT_FILTER); + } + + @Override + public RemoteIterator listLocatedStatus(final Path f, + final PathFilter filter) throws IOException { +Path qualifiedPath = f.makeQualified(uri, workingDir); +final FileStatus status =
[32/46] hadoop git commit: YARN-7390. All reservation related test cases failed when TestYarnClient runs against Fair Scheduler. (Yufei Gu via Haibo Chen)
YARN-7390. All reservation related test cases failed when TestYarnClient runs against Fair Scheduler. (Yufei Gu via Haibo Chen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28d0fcbe Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28d0fcbe Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28d0fcbe Branch: refs/heads/YARN-5881 Commit: 28d0fcbef40930ca5652c0e9a5d777910f3ad3c4 Parents: 61ace17 Author: Haibo ChenAuthored: Thu Nov 16 10:48:24 2017 -0800 Committer: Haibo Chen Committed: Thu Nov 16 10:48:24 2017 -0800 -- .../yarn/client/api/impl/TestYarnClient.java| 455 ++-- .../api/impl/TestYarnClientWithReservation.java | 521 +++ 2 files changed, 551 insertions(+), 425 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/28d0fcbe/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index 4c1a9cf..f6e305f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -33,7 +33,6 @@ import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; @@ -42,7 +41,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import com.google.common.base.Supplier; import org.apache.commons.io.IOUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataInputByteBuffer; @@ -75,14 +73,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationListRequest; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationListResponse; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest; -import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.SignalContainerRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -96,13 +86,7 @@ import org.apache.hadoop.yarn.api.records.ContainerReport; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; -import org.apache.hadoop.yarn.api.records.NodeLabel; import org.apache.hadoop.yarn.api.records.Priority; -import org.apache.hadoop.yarn.api.records.ReservationDefinition; -import org.apache.hadoop.yarn.api.records.ReservationId; -import org.apache.hadoop.yarn.api.records.ReservationRequest; -import org.apache.hadoop.yarn.api.records.ReservationRequestInterpreter; -import org.apache.hadoop.yarn.api.records.ReservationRequests; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.SignalContainerCommand; import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; @@ -119,23 +103,28 @@ import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.ParameterizedSchedulerTestBase; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; -import org.apache.hadoop.yarn.server.resourcemanager.reservation.ReservationSystemTestUtil; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import
[31/46] hadoop git commit: YARN-7469. Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit. Contributed by Eric Payne.
YARN-7469. Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit. Contributed by Eric Payne. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61ace174 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61ace174 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61ace174 Branch: refs/heads/YARN-5881 Commit: 61ace174cdcbca9d22abce7aa0aa71148f37ad55 Parents: f4d5d20 Author: Sunil GAuthored: Thu Nov 16 22:34:23 2017 +0530 Committer: Sunil G Committed: Thu Nov 16 22:34:23 2017 +0530 -- .../FifoIntraQueuePreemptionPlugin.java | 6 ...alCapacityPreemptionPolicyMockFramework.java | 3 ++ ...cityPreemptionPolicyIntraQueueUserLimit.java | 35 3 files changed, 44 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/61ace174/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java index 00ae3da..3332f2a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java @@ -203,6 +203,12 @@ public class FifoIntraQueuePreemptionPlugin Resources.subtractFromNonNegative(preemtableFromApp, tmpApp.selected); Resources.subtractFromNonNegative(preemtableFromApp, tmpApp.getAMUsed()); + if (context.getIntraQueuePreemptionOrderPolicy() +.equals(IntraQueuePreemptionOrderPolicy.USERLIMIT_FIRST)) { +Resources.subtractFromNonNegative(preemtableFromApp, + tmpApp.getFiCaSchedulerApp().getCSLeafQueue().getMinimumAllocation()); + } + // Calculate toBePreempted from apps as follows: // app.preemptable = min(max(app.used - app.selected - app.ideal, 0), // intra_q_preemptable) http://git-wip-us.apache.org/repos/asf/hadoop/blob/61ace174/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java index 4fc0ea4..0bc5cb5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java @@ -358,6 +358,9 @@ public class ProportionalCapacityPreemptionPolicyMockFramework { queue = (LeafQueue) nameToCSQueues.get(queueName); queue.getApplications().add(app); queue.getAllApplications().add(app); + when(queue.getMinimumAllocation()) + .thenReturn(Resource.newInstance(1,1)); + when(app.getCSLeafQueue()).thenReturn(queue); HashSet users = userMap.get(queueName); if (null == users) { http://git-wip-us.apache.org/repos/asf/hadoop/blob/61ace174/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyIntraQueueUserLimit.java -- diff --git
[15/46] hadoop git commit: YARN-7466. ResourceRequest has a different default for allocationRequestId than Container. Contributed by Chandni Singh
YARN-7466. ResourceRequest has a different default for allocationRequestId than Container. Contributed by Chandni Singh Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5323b004 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5323b004 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5323b004 Branch: refs/heads/YARN-5881 Commit: 5323b0048b743771276ba860b10c27b23a70bf9e Parents: e14f03d Author: Jian HeAuthored: Mon Nov 13 15:37:39 2017 -0800 Committer: Jian He Committed: Mon Nov 13 15:37:39 2017 -0800 -- .../hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5323b004/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index e69c07b..7769c48 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -388,7 +388,7 @@ message ResourceRequestProto { optional bool relax_locality = 5 [default = true]; optional string node_label_expression = 6; optional ExecutionTypeRequestProto execution_type_request = 7; - optional int64 allocation_request_id = 8 [default = 0]; + optional int64 allocation_request_id = 8 [default = -1]; optional ProfileCapabilityProto profile = 9; } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[44/46] hadoop git commit: YARN-7482. Max applications calculation per queue has to be retrospected with absolute resource support. Contributed by Sunil G.
YARN-7482. Max applications calculation per queue has to be retrospected with absolute resource support. Contributed by Sunil G. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01325d4b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01325d4b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01325d4b Branch: refs/heads/YARN-5881 Commit: 01325d4b8daeebc8f9c231f0557d8411cff75c3e Parents: 5d28cb1 Author: Rohith Sharma K SAuthored: Thu Nov 16 17:33:00 2017 +0530 Committer: Sunil G Committed: Fri Nov 17 19:59:32 2017 +0530 -- .../scheduler/capacity/ParentQueue.java | 20 1 file changed, 20 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/01325d4b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 940637e..a427fb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -1088,6 +1088,26 @@ public class ParentQueue extends AbstractCSQueue { childQueue.getQueueCapacities().setAbsoluteMaximumCapacity(label, (float) childQueue.getQueueCapacities().getMaximumCapacity(label) / getQueueCapacities().getAbsoluteMaximumCapacity(label)); + +// Re-visit max applications for a queue based on absolute capacity if +// needed. +if (childQueue instanceof LeafQueue) { + LeafQueue leafQueue = (LeafQueue) childQueue; + CapacitySchedulerConfiguration conf = csContext.getConfiguration(); + int maxApplications = (int) (conf.getMaximumSystemApplications() + * childQueue.getQueueCapacities().getAbsoluteCapacity(label)); + leafQueue.setMaxApplications(maxApplications); + + int maxApplicationsPerUser = Math.min(maxApplications, + (int) (maxApplications + * (leafQueue.getUsersManager().getUserLimit() / 100.0f) + * leafQueue.getUsersManager().getUserLimitFactor())); + leafQueue.setMaxApplicationsPerUser(maxApplicationsPerUser); + LOG.info("LeafQueue:" + leafQueue.getQueueName() + ", maxApplications=" + + maxApplications + ", maxApplicationsPerUser=" + + maxApplicationsPerUser + ", Abs Cap:" + + childQueue.getQueueCapacities().getAbsoluteCapacity(label)); +} } @Override - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[41/46] hadoop git commit: YARN-6471. Support to add min/max resource configuration for a queue. (Sunil G via wangda)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/942f6f59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java index 0544387..4985a1a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java @@ -20,9 +20,11 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy; import com.google.common.annotations.VisibleForTesting; import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration; +import org.apache.hadoop.yarn.util.resource.Resources; import java.util.ArrayList; import java.util.Collections; @@ -121,6 +123,15 @@ public class PriorityUtilizationQueueOrderingPolicy implements QueueOrderingPoli // For queue with same used ratio / priority, queue with higher configured // capacity goes first if (0 == rc) { +Resource minEffRes1 = q1.getQueueResourceQuotas() +.getConfiguredMinResource(p); +Resource minEffRes2 = q2.getQueueResourceQuotas() +.getConfiguredMinResource(p); +if (!minEffRes1.equals(Resources.none()) +&& !minEffRes2.equals(Resources.none())) { + return minEffRes2.compareTo(minEffRes1); +} + float abs1 = q1.getQueueCapacities().getAbsoluteCapacity(p); float abs2 = q2.getQueueCapacities().getAbsoluteCapacity(p); return Float.compare(abs2, abs1); http://git-wip-us.apache.org/repos/asf/hadoop/blob/942f6f59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java index 22705cc..86b2fea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/CapacitySchedulerQueueInfo.java @@ -62,6 +62,8 @@ public class CapacitySchedulerQueueInfo { protected long pendingContainers; protected QueueCapacitiesInfo capacities; protected ResourcesInfo resources; + protected ResourceInfo minEffectiveCapacity; + protected ResourceInfo maxEffectiveCapacity; CapacitySchedulerQueueInfo() { }; @@ -105,6 +107,11 @@ public class CapacitySchedulerQueueInfo { ResourceUsage queueResourceUsage = q.getQueueResourceUsage(); populateQueueResourceUsage(queueResourceUsage); + +minEffectiveCapacity = new ResourceInfo( +q.getQueueResourceQuotas().getEffectiveMinResource()); +maxEffectiveCapacity = new ResourceInfo( +q.getQueueResourceQuotas().getEffectiveMaxResource()); } protected void populateQueueResourceUsage(ResourceUsage queueResourceUsage) { @@ -200,4 +207,12 @@ public class CapacitySchedulerQueueInfo { public ResourcesInfo getResources() { return resources; } + + public ResourceInfo getMinEffectiveCapacity(){ +return minEffectiveCapacity; + } + + public ResourceInfo getMaxEffectiveCapacity(){ +return maxEffectiveCapacity; + } }
[22/46] hadoop git commit: YARN-6953. Clean up ResourceUtils.setMinimumAllocationForMandatoryResources() and setMaximumAllocationForMandatoryResources() (Contributed by Manikandan R via Daniel Templet
YARN-6953. Clean up ResourceUtils.setMinimumAllocationForMandatoryResources() and setMaximumAllocationForMandatoryResources() (Contributed by Manikandan R via Daniel Templeton) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e094eb74 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e094eb74 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e094eb74 Branch: refs/heads/YARN-5881 Commit: e094eb74b9e7d8c3c6f1990445d248b062cc230b Parents: 69043ba Author: Daniel TempletonAuthored: Wed Nov 15 09:55:40 2017 -0800 Committer: Daniel Templeton Committed: Wed Nov 15 10:03:29 2017 -0800 -- .../yarn/util/resource/ResourceUtils.java | 108 +++ 1 file changed, 38 insertions(+), 70 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e094eb74/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java index 3deace8..c168337 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceUtils.java @@ -142,74 +142,44 @@ public class ResourceUtils { } } - private static void setMinimumAllocationForMandatoryResources( + private static void setAllocationForMandatoryResources( Map res, Configuration conf) { -String[][] resourceTypesKeys = { -{ResourceInformation.MEMORY_MB.getName(), -YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, -String.valueOf( -YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB), -ResourceInformation.MEMORY_MB.getName()}, -{ResourceInformation.VCORES.getName(), -YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, -String.valueOf( - YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES), -ResourceInformation.VCORES.getName()}}; -for (String[] arr : resourceTypesKeys) { - String resourceTypesKey = - YarnConfiguration.RESOURCE_TYPES + "." + arr[0] + MINIMUM_ALLOCATION; - long minimumResourceTypes = conf.getLong(resourceTypesKey, -1); - long minimumConf = conf.getLong(arr[1], -1); - long minimum; - if (minimumResourceTypes != -1) { -minimum = minimumResourceTypes; -if (minimumConf != -1) { - LOG.warn("Using minimum allocation for memory specified in " - + "resource-types config file with key " - + minimumResourceTypes + ", ignoring minimum specified using " - + arr[1]); -} - } else { -minimum = conf.getLong(arr[1], Long.parseLong(arr[2])); - } - ResourceInformation ri = res.get(arr[3]); - ri.setMinimumAllocation(minimum); -} - } - - private static void setMaximumAllocationForMandatoryResources( - Map res, Configuration conf) { -String[][] resourceTypesKeys = { -{ResourceInformation.MEMORY_MB.getName(), -YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, -String.valueOf( -YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB), -ResourceInformation.MEMORY_MB.getName()}, -{ResourceInformation.VCORES.getName(), -YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, -String.valueOf( - YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES), -ResourceInformation.VCORES.getName()}}; -for (String[] arr : resourceTypesKeys) { - String resourceTypesKey = - YarnConfiguration.RESOURCE_TYPES + "." + arr[0] + MAXIMUM_ALLOCATION; - long maximumResourceTypes = conf.getLong(resourceTypesKey, -1); - long maximumConf = conf.getLong(arr[1], -1); - long maximum; - if (maximumResourceTypes != -1) { -maximum = maximumResourceTypes; -if (maximumConf != -1) { - LOG.warn("Using maximum allocation for memory specified in " - + "resource-types config file with key " - + maximumResourceTypes + ", ignoring maximum specified using " - + arr[1]); -} - } else { -maximum = conf.getLong(arr[1], Long.parseLong(arr[2])); - } -
[46/46] hadoop git commit: YARN-7254. UI and metrics changes related to absolute resource configuration. (Sunil G via wangda)
YARN-7254. UI and metrics changes related to absolute resource configuration. (Sunil G via wangda) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4ba54b0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4ba54b0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4ba54b0 Branch: refs/heads/YARN-5881 Commit: c4ba54b0375b9534f5cae23e751b2f54a3cfeb9a Parents: 942f6f5 Author: Wangda TanAuthored: Mon Oct 16 11:04:05 2017 -0700 Committer: Sunil G Committed: Fri Nov 17 19:59:32 2017 +0530 -- .../yarn/api/records/QueueConfigurations.java | 76 +++ .../yarn/api/records/ResourceInformation.java | 6 + .../src/main/proto/yarn_protos.proto| 4 + .../impl/pb/QueueConfigurationsPBImpl.java | 136 ++- .../resource/DominantResourceCalculator.java| 24 +++- .../scheduler/QueueResourceQuotas.java | 38 -- .../scheduler/capacity/AbstractCSQueue.java | 18 ++- .../scheduler/capacity/CSQueue.java | 4 +- .../scheduler/capacity/LeafQueue.java | 4 +- .../scheduler/capacity/ParentQueue.java | 56 ++-- .../scheduler/capacity/UsersManager.java| 2 +- .../scheduler/common/fica/FiCaSchedulerApp.java | 23 +++- .../webapp/CapacitySchedulerPage.java | 49 +-- .../webapp/dao/CapacitySchedulerInfo.java | 3 +- .../dao/CapacitySchedulerLeafQueueInfo.java | 6 +- .../webapp/dao/CapacitySchedulerQueueInfo.java | 10 +- .../dao/PartitionQueueCapacitiesInfo.java | 34 - .../webapp/dao/QueueCapacitiesInfo.java | 13 +- ...CapacitySchedulerWithMultiResourceTypes.java | 25 ++-- .../scheduler/capacity/TestLeafQueue.java | 24 +++- 20 files changed, 443 insertions(+), 112 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4ba54b0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueConfigurations.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueConfigurations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueConfigurations.java index e25c8aa..30096bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueConfigurations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/QueueConfigurations.java @@ -147,4 +147,80 @@ public abstract class QueueConfigurations { @Private @Unstable public abstract void setMaxAMPercentage(float maxAMPercentage); + + /** + * Get the effective minimum capacity of queue (from absolute resource). + * + * @return minimum resource capability + */ + @Public + @Unstable + public abstract Resource getEffectiveMinCapacity(); + + /** + * Set the effective minimum capacity of queue (from absolute resource). + * + * @param capacity + * minimum resource capability + */ + @Private + @Unstable + public abstract void setEffectiveMinCapacity(Resource capacity); + + /** + * Get the effective maximum capacity of queue (from absolute resource). + * + * @return maximum resource capability + */ + @Public + @Unstable + public abstract Resource getEffectiveMaxCapacity(); + + /** + * Set the effective maximum capacity of queue (from absolute resource). + * + * @param capacity + * maximum resource capability + */ + @Private + @Unstable + public abstract void setEffectiveMaxCapacity(Resource capacity); + + /** + * Get the configured minimum capacity of queue (from absolute resource). + * + * @return minimum resource capability + */ + @Public + @Unstable + public abstract Resource getConfiguredMinCapacity(); + + /** + * Set the configured minimum capacity of queue (from absolute resource). + * + * @param configuredMinResource + * minimum resource capability + */ + @Public + @Unstable + public abstract void setConfiguredMinCapacity(Resource configuredMinResource); + + /** + * Get the configured maximum capacity of queue (from absolute resource). + * + * @return maximum resource capability + */ + @Public + @Unstable + public abstract Resource getConfiguredMaxCapacity(); + + /** + * Set the configured maximum capacity of queue (from absolute resource). + * + * @param configuredMaxResource + * maximum resource capability + */ + @Public + @Unstable + public abstract void setConfiguredMaxCapacity(Resource configuredMaxResource); }
[38/46] hadoop git commit: YARN-7430. Enable user re-mapping for Docker containers by default. Contributed by Eric Yang.
YARN-7430. Enable user re-mapping for Docker containers by default. Contributed by Eric Yang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f0b238a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f0b238a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f0b238a Branch: refs/heads/YARN-5881 Commit: 5f0b238a118f3992bd149d8c02e6a1376dee96d7 Parents: e182e77 Author: Varun VasudevAuthored: Fri Nov 17 12:04:47 2017 +0530 Committer: Varun Vasudev Committed: Fri Nov 17 12:04:47 2017 +0530 -- .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f0b238a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 4799137..34257ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -1735,7 +1735,7 @@ public class YarnConfiguration extends Configuration { DOCKER_CONTAINER_RUNTIME_PREFIX + "enable-userremapping.allowed"; /** Set enable user remapping as false by default. */ - public static final boolean DEFAULT_NM_DOCKER_ENABLE_USER_REMAPPING = false; + public static final boolean DEFAULT_NM_DOCKER_ENABLE_USER_REMAPPING = true; /** lower limit for acceptable uids of user remapped user. */ public static final String NM_DOCKER_USER_REMAPPING_UID_THRESHOLD = - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[16/46] hadoop git commit: HADOOP-15037. Add site release notes for OrgQueue and resource types.
HADOOP-15037. Add site release notes for OrgQueue and resource types. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8b125741 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8b125741 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8b125741 Branch: refs/heads/YARN-5881 Commit: 8b125741659a825c71877bd1b1cb8f7e3ef26436 Parents: 5323b00 Author: Andrew WangAuthored: Mon Nov 13 18:49:22 2017 -0800 Committer: Andrew Wang Committed: Mon Nov 13 18:49:22 2017 -0800 -- hadoop-project/src/site/markdown/index.md.vm | 20 +++- 1 file changed, 19 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8b125741/hadoop-project/src/site/markdown/index.md.vm -- diff --git a/hadoop-project/src/site/markdown/index.md.vm b/hadoop-project/src/site/markdown/index.md.vm index 8e1e06f..9b2d9de 100644 --- a/hadoop-project/src/site/markdown/index.md.vm +++ b/hadoop-project/src/site/markdown/index.md.vm @@ -225,9 +225,27 @@ cluster for existing HDFS clients. See [HDFS-10467](https://issues.apache.org/jira/browse/HADOOP-10467) and the HDFS Router-based Federation -[documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.md) for +[documentation](./hadoop-project-dist/hadoop-hdfs/HDFSRouterFederation.html) for more details. +API-based configuration of Capacity Scheduler queue configuration +-- + +The OrgQueue extension to the capacity scheduler provides a programmatic way to +change configurations by providing a REST API that users can call to modify +queue configurations. This enables automation of queue configuration management +by administrators in the queue's `administer_queue` ACL. + +See [YARN-5734](https://issues.apache.org/jira/browse/YARN-5734) and the +[Capacity Scheduler documentation](./hadoop-yarn/hadoop-yarn-site/CapacityScheduler.html) for more information. + +YARN Resource Types +--- + +The YARN resource model has been generalized to support user-defined countable resource types beyond CPU and memory. For instance, the cluster administrator could define resources like GPUs, software licenses, or locally-attached storage. YARN tasks can then be scheduled based on the availability of these resources. + +See [YARN-3926](https://issues.apache.org/jira/browse/YARN-3926) and the [YARN resource model documentation](./hadoop-yarn/hadoop-yarn-site/ResourceModel.html) for more information. + Getting Started === - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[19/46] hadoop git commit: YARN-7462. Render outstanding resource requests on application page of new YARN UI. Contributed by Vasudevan Skm.
YARN-7462. Render outstanding resource requests on application page of new YARN UI. Contributed by Vasudevan Skm. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4c57b80 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4c57b80 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4c57b80 Branch: refs/heads/YARN-5881 Commit: c4c57b80e1e43391417e958f455e25fd7ff67d07 Parents: 18621af Author: Sunil GAuthored: Wed Nov 15 12:01:29 2017 +0530 Committer: Sunil G Committed: Wed Nov 15 12:01:29 2017 +0530 -- .gitignore | 4 +- .../src/main/webapp/app/models/yarn-app.js | 1 + .../src/main/webapp/app/serializers/yarn-app.js | 1 + .../main/webapp/app/templates/yarn-app/info.hbs | 42 4 files changed, 45 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4c57b80/.gitignore -- diff --git a/.gitignore b/.gitignore index 817556f..70c1f23 100644 --- a/.gitignore +++ b/.gitignore @@ -44,6 +44,4 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/dist hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp yarnregistry.pdf patchprocess/ - - -.history/ \ No newline at end of file +.history/ http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4c57b80/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js index 47814e4..853e2ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/yarn-app.js @@ -52,6 +52,7 @@ export default DS.Model.extend({ currentAppAttemptId: DS.attr('string'), remainingTimeoutInSeconds: DS.attr('number'), applicationExpiryTime: DS.attr('string'), + resourceRequests: DS.attr('array'), isFailed: function() { return this.get('finalStatus') === "FAILED"; http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4c57b80/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js index 1462f5a..efdb1ba 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-app.js @@ -58,6 +58,7 @@ export default DS.JSONAPISerializer.extend({ allocatedMB: payload.allocatedMB, allocatedVCores: payload.allocatedVCores, runningContainers: payload.runningContainers, + resourceRequests: payload.resourceRequests, memorySeconds: payload.memorySeconds, vcoreSeconds: payload.vcoreSeconds, preemptedResourceMB: payload.preemptedResourceMB, http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4c57b80/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs index 534869e..6b06961 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/templates/yarn-app/info.hbs @@ -132,6 +132,48 @@ {{/if}} +{{#unless model.serviceName}} + + + +Outstanding Resource Requests + + + +Scheduler Key +Resource Name +Capability +# Containers +Relax Locality +Node Label Expression + + + + {{#each model.app.resourceRequests as |request|}} + + {{request.priority}} + {{request.resourceName}} + Memory:{{request.capability.memory}};vCores:{{request.capability.virtualCores}} + {{request.numContainers}} + {{request.relaxLocality}} + +{{#if request.nodeLabelExpression}} +
[36/46] hadoop git commit: YARN-7419. CapacityScheduler: Allow auto leaf queue creation after queue mapping. (Suma Shivaprasad via wangda)
YARN-7419. CapacityScheduler: Allow auto leaf queue creation after queue mapping. (Suma Shivaprasad via wangda) Change-Id: Ia1704bb8cb5070e5b180b5a85787d7b9ca57ebc6 Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0987a7b8 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0987a7b8 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0987a7b8 Branch: refs/heads/YARN-5881 Commit: 0987a7b8c2c1e4c2095821d98a7db19644df Parents: f2efaf0 Author: Wangda TanAuthored: Thu Nov 16 11:22:48 2017 -0800 Committer: Wangda Tan Committed: Thu Nov 16 11:25:52 2017 -0800 -- .../server/resourcemanager/RMAppManager.java| 7 +- .../placement/ApplicationPlacementContext.java | 52 ++ .../placement/PlacementManager.java | 34 +- .../placement/PlacementRule.java| 7 +- .../UserGroupMappingPlacementRule.java | 284 ++- .../server/resourcemanager/rmapp/RMAppImpl.java | 87 +- .../scheduler/capacity/AbstractCSQueue.java | 2 +- .../capacity/AbstractManagedParentQueue.java| 196 +++-- .../capacity/AutoCreatedLeafQueue.java | 27 +- .../scheduler/capacity/CapacityScheduler.java | 157 +++- .../CapacitySchedulerConfiguration.java | 153 .../capacity/CapacitySchedulerQueueManager.java | 103 ++- .../scheduler/capacity/ManagedParentQueue.java | 158 .../scheduler/capacity/ParentQueue.java | 13 - .../scheduler/capacity/PlanQueue.java | 25 +- .../scheduler/event/AppAddedSchedulerEvent.java | 37 +- .../server/resourcemanager/TestAppManager.java | 29 +- .../TestUserGroupMappingPlacementRule.java | 14 +- .../scheduler/TestSchedulerUtils.java | 1 + .../capacity/TestCapacityScheduler.java | 6 +- .../TestCapacitySchedulerAutoQueueCreation.java | 794 +++ 21 files changed, 1921 insertions(+), 265 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/0987a7b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index d042590..5e82f40 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -360,13 +360,8 @@ public class RMAppManager implements EventHandler, private RMAppImpl createAndPopulateNewRMApp( ApplicationSubmissionContext submissionContext, long submitTime, String user, boolean isRecovery, long startTime) throws YarnException { + if (!isRecovery) { - // Do queue mapping - if (rmContext.getQueuePlacementManager() != null) { -// We only do queue mapping when it's a new application -rmContext.getQueuePlacementManager().placeApplication( -submissionContext, user); - } // fail the submission if configured application timeout value is invalid RMServerUtils.validateApplicationTimeouts( submissionContext.getApplicationTimeouts()); http://git-wip-us.apache.org/repos/asf/hadoop/blob/0987a7b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/ApplicationPlacementContext.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/ApplicationPlacementContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/ApplicationPlacementContext.java new file mode 100644 index 000..f2f92b8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/ApplicationPlacementContext.java @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license
[40/46] hadoop git commit: YARN-6471. Support to add min/max resource configuration for a queue. (Sunil G via wangda)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/942f6f59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/TestPriorityUtilizationQueueOrderingPolicy.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/TestPriorityUtilizationQueueOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/TestPriorityUtilizationQueueOrderingPolicy.java index e3c108a..b9d5b82 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/TestPriorityUtilizationQueueOrderingPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/TestPriorityUtilizationQueueOrderingPolicy.java @@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy; import com.google.common.collect.ImmutableSet; import com.google.common.collect.ImmutableTable; import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueResourceQuotas; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.QueueCapacities; import org.junit.Assert; @@ -52,6 +53,8 @@ public class TestPriorityUtilizationQueueOrderingPolicy { when(q.getQueueCapacities()).thenReturn(qc); when(q.getPriority()).thenReturn(Priority.newInstance(priorities[i])); + QueueResourceQuotas qr = new QueueResourceQuotas(); + when(q.getQueueResourceQuotas()).thenReturn(qr); list.add(q); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/942f6f59/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java index 1108f1a..0132348 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesCapacitySched.java @@ -354,10 +354,10 @@ public class TestRMWebServicesCapacitySched extends JerseyTestBase { private void verifySubQueue(JSONObject info, String q, float parentAbsCapacity, float parentAbsMaxCapacity) throws JSONException, Exception { -int numExpectedElements = 18; +int numExpectedElements = 20; boolean isParentQueue = true; if (!info.has("queues")) { - numExpectedElements = 31; + numExpectedElements = 33; isParentQueue = false; } assertEquals("incorrect number of elements", numExpectedElements, info.length()); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[29/46] hadoop git commit: HDFS-12814. Add blockId when warning slow mirror/disk in BlockReceiver. Contributed by Jiandan Yang.
HDFS-12814. Add blockId when warning slow mirror/disk in BlockReceiver. Contributed by Jiandan Yang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/462e25a3 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/462e25a3 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/462e25a3 Branch: refs/heads/YARN-5881 Commit: 462e25a3b264e1148d0cbca00db7f10d43a0555f Parents: 09a1342 Author: Weiwei YangAuthored: Thu Nov 16 16:19:53 2017 +0800 Committer: Weiwei Yang Committed: Thu Nov 16 16:19:53 2017 +0800 -- .../hadoop/hdfs/server/datanode/BlockReceiver.java | 16 +++- 1 file changed, 11 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/462e25a3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java index 8d91f04..c052d52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java @@ -434,7 +434,8 @@ class BlockReceiver implements Closeable { if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) { LOG.warn("Slow flushOrSync took " + duration + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), isSync:" + isSync + ", flushTotalNanos=" - + flushTotalNanos + "ns, volume=" + getVolumeBaseUri()); + + flushTotalNanos + "ns, volume=" + getVolumeBaseUri() + + ", blockId=" + replicaInfo.getBlockId()); } } @@ -591,7 +592,8 @@ class BlockReceiver implements Closeable { if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) { LOG.warn("Slow BlockReceiver write packet to mirror took " + duration + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " - + "downstream DNs=" + Arrays.toString(downstreamDNs)); + + "downstream DNs=" + Arrays.toString(downstreamDNs) + + ", blockId=" + replicaInfo.getBlockId()); } } catch (IOException e) { handleMirrorOutError(e); @@ -725,7 +727,8 @@ class BlockReceiver implements Closeable { if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) { LOG.warn("Slow BlockReceiver write data to disk cost:" + duration + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " -+ "volume=" + getVolumeBaseUri()); ++ "volume=" + getVolumeBaseUri() ++ ", blockId=" + replicaInfo.getBlockId()); } if (duration > maxWriteToDiskMs) { @@ -917,7 +920,8 @@ class BlockReceiver implements Closeable { if (duration > datanodeSlowLogThresholdMs && LOG.isWarnEnabled()) { LOG.warn("Slow manageWriterOsCache took " + duration + "ms (threshold=" + datanodeSlowLogThresholdMs - + "ms), volume=" + getVolumeBaseUri()); + + "ms), volume=" + getVolumeBaseUri() + + ", blockId=" + replicaInfo.getBlockId()); } } } catch (Throwable t) { @@ -1629,7 +1633,9 @@ class BlockReceiver implements Closeable { if (duration > datanodeSlowLogThresholdMs) { LOG.warn("Slow PacketResponder send ack to upstream took " + duration + "ms (threshold=" + datanodeSlowLogThresholdMs + "ms), " + myString -+ ", replyAck=" + replyAck); ++ ", replyAck=" + replyAck ++ ", downstream DNs=" + Arrays.toString(downstreamDNs) ++ ", blockId=" + replicaInfo.getBlockId()); } else if (LOG.isDebugEnabled()) { LOG.debug(myString + ", replyAck=" + replyAck); } - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[20/46] hadoop git commit: YARN-7464. Introduce filters in Nodes page of new YARN UI. Contributed by Vasudevan Skm.
YARN-7464. Introduce filters in Nodes page of new YARN UI. Contributed by Vasudevan Skm. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2f379d41 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2f379d41 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2f379d41 Branch: refs/heads/YARN-5881 Commit: 2f379d412b761463620df0346f7ff3bd01581568 Parents: c4c57b8 Author: Sunil GAuthored: Wed Nov 15 14:02:53 2017 +0530 Committer: Sunil G Committed: Wed Nov 15 14:02:53 2017 +0530 -- .../webapp/app/controllers/yarn-nodes-status.js | 36 .../webapp/app/controllers/yarn-nodes/table.js | 17 +- .../src/main/webapp/app/models/yarn-rm-node.js | 2 +- .../src/main/webapp/app/router.js | 1 + .../app/templates/components/nodes-heatmap.hbs | 16 +++--- .../main/webapp/app/templates/yarn-nodes.hbs| 59 +++- .../webapp/app/templates/yarn-nodes/heatmap.hbs | 10 +--- .../webapp/app/templates/yarn-nodes/status.hbs | 35 .../webapp/app/templates/yarn-nodes/table.hbs | 4 +- 9 files changed, 120 insertions(+), 60 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f379d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-nodes-status.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-nodes-status.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-nodes-status.js new file mode 100644 index 000..b007cf4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-nodes-status.js @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Controller.extend({ + needReload: true, + selectedQueue: undefined, + + breadcrumbs: [{ +text: "Home", +routeName: 'application' + }, { +text: "Nodes", +routeName: 'yarn-nodes.table', + }, { +text: "Node status", +routeName: 'yarn-nodes-status', + }] + +}); http://git-wip-us.apache.org/repos/asf/hadoop/blob/2f379d41/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-nodes/table.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-nodes/table.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-nodes/table.js index 3fae596..30180dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-nodes/table.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-nodes/table.js @@ -29,6 +29,10 @@ export default Ember.Controller.extend({ sortOrder: Ember.computed.alias('tableDefinition.sortOrder'), pageNum: Ember.computed.alias('tableDefinition.pageNum'), rowCount: Ember.computed.alias('tableDefinition.rowCount'), +tableDefinition: TableDefinition.create({ +enableFaceting: true, +rowCount: 25 +}), columns: function() { var colums = []; colums.push({ @@ -40,6 +44,7 @@ export default Ember.Controller.extend({ id: 'rack', headerTitle: 'Rack', contentPath: 'rack', +facetType: null, minWidth: "100px" }, { id: 'state', @@ -51,12 +56,14 @@ export default Ember.Controller.extend({ id: 'address', headerTitle: 'Node Address', contentPath: 'id', -minWidth: "300px" +minWidth: "300px", +facetType: null, }, { id: 'nodeId', headerTitle: 'Node HTTP Address', contentPath: 'nodeHTTPAddress', cellComponentName: 'em-table-linked-cell', +
[45/46] hadoop git commit: YARN-7332. Compute effectiveCapacity per each resource vector. (Sunil G via wangda)
YARN-7332. Compute effectiveCapacity per each resource vector. (Sunil G via wangda) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0b03ffb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0b03ffb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0b03ffb Branch: refs/heads/YARN-5881 Commit: c0b03ffb9803b6d8378cadcba456d6e0fef3a884 Parents: c4ba54b Author: Wangda TanAuthored: Fri Oct 27 10:16:33 2017 -0700 Committer: Sunil G Committed: Fri Nov 17 19:59:32 2017 +0530 -- .../scheduler/capacity/ParentQueue.java | 66 -- .../scheduler/capacity/TestParentQueue.java | 94 2 files changed, 153 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0b03ffb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java index 5ab1494..940637e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/ParentQueue.java @@ -44,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.security.AccessType; @@ -68,7 +69,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaS import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.CandidateNodeSet; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.CandidateNodeSetUtils; +import org.apache.hadoop.yarn.util.UnitsConversionUtil; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; @Private @@ -928,24 +931,25 @@ public class ParentQueue extends AbstractCSQueue { // Factor to scale down effective resource: When cluster has sufficient // resources, effective_min_resources will be same as configured // min_resources. -float effectiveMinRatio = 1; +Resource numeratorForMinRatio = null; ResourceCalculator rc = this.csContext.getResourceCalculator(); if (getQueueName().equals("root")) { if (!resourceByLabel.equals(Resources.none()) && Resources.lessThan(rc, clusterResource, resourceByLabel, configuredMinResources)) { -effectiveMinRatio = Resources.divide(rc, clusterResource, -resourceByLabel, configuredMinResources); +numeratorForMinRatio = resourceByLabel; } } else { if (Resources.lessThan(rc, clusterResource, queueResourceQuotas.getEffectiveMinResource(label), configuredMinResources)) { -effectiveMinRatio = Resources.divide(rc, clusterResource, -queueResourceQuotas.getEffectiveMinResource(label), -configuredMinResources); +numeratorForMinRatio = queueResourceQuotas +.getEffectiveMinResource(label); } } +Map effectiveMinRatioPerResource = getEffectiveMinRatioPerResource( +configuredMinResources, numeratorForMinRatio); + // loop and do this for all child queues for (CSQueue childQueue : getChildQueues()) { Resource minResource = childQueue.getQueueResourceQuotas() @@ -955,7 +959,8 @@ public class ParentQueue extends AbstractCSQueue { if (childQueue.getCapacityConfigType() .equals(CapacityConfigType.ABSOLUTE_RESOURCE)) { childQueue.getQueueResourceQuotas().setEffectiveMinResource(label, -Resources.multiply(minResource,
[23/46] hadoop git commit: YARN-7414. FairScheduler#getAppWeight() should be moved into FSAppAttempt#getWeight() (Contributed by Soumabrata Chakraborty via Daniel Templeton)
YARN-7414. FairScheduler#getAppWeight() should be moved into FSAppAttempt#getWeight() (Contributed by Soumabrata Chakraborty via Daniel Templeton) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b246c547 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b246c547 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b246c547 Branch: refs/heads/YARN-5881 Commit: b246c547490dd94271806ca4caf1e5f199f0fb09 Parents: e094eb7 Author: Daniel TempletonAuthored: Wed Nov 15 09:56:37 2017 -0800 Committer: Daniel Templeton Committed: Wed Nov 15 10:03:29 2017 -0800 -- .../scheduler/fair/FSAppAttempt.java | 15 ++- .../scheduler/fair/FairScheduler.java | 17 ++--- 2 files changed, 16 insertions(+), 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b246c547/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java index bbd4418..94991eb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java @@ -1304,7 +1304,20 @@ public class FSAppAttempt extends SchedulerApplicationAttempt @Override public float getWeight() { -return scheduler.getAppWeight(this); +double weight = 1.0; + +if (scheduler.isSizeBasedWeight()) { + scheduler.getSchedulerReadLock().lock(); + + try { +// Set weight based on current memory demand +weight = Math.log1p(getDemand().getMemorySize()) / Math.log(2); + } finally { +scheduler.getSchedulerReadLock().unlock(); + } +} + +return (float)weight * this.getPriority().getPriority(); } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/b246c547/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 7f1b91e..b2978d4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -368,21 +368,8 @@ public class FairScheduler extends return rmContext.getContainerTokenSecretManager(); } - public float getAppWeight(FSAppAttempt app) { -double weight = 1.0; - -if (sizeBasedWeight) { - readLock.lock(); - - try { -// Set weight based on current memory demand -weight = Math.log1p(app.getDemand().getMemorySize()) / Math.log(2); - } finally { -readLock.unlock(); - } -} - -return (float)weight * app.getPriority().getPriority(); + public boolean isSizeBasedWeight() { +return sizeBasedWeight; } public Resource getIncrementResourceCapability() { - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[35/46] hadoop git commit: YARN-7419. CapacityScheduler: Allow auto leaf queue creation after queue mapping. (Suma Shivaprasad via wangda)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0987a7b8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java index 0a8d6fe..80b7f2f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/event/AppAddedSchedulerEvent.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -22,6 +22,8 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.ReservationId; +import org.apache.hadoop.yarn.server.resourcemanager.placement +.ApplicationPlacementContext; public class AppAddedSchedulerEvent extends SchedulerEvent { @@ -31,15 +33,23 @@ public class AppAddedSchedulerEvent extends SchedulerEvent { private final ReservationId reservationID; private final boolean isAppRecovering; private final Priority appPriority; + private final ApplicationPlacementContext placementContext; public AppAddedSchedulerEvent(ApplicationId applicationId, String queue, String user) { -this(applicationId, queue, user, false, null, Priority.newInstance(0)); +this(applicationId, queue, user, false, null, Priority.newInstance(0), +null); + } + + public AppAddedSchedulerEvent(ApplicationId applicationId, String queue, + String user, ApplicationPlacementContext placementContext) { +this(applicationId, queue, user, false, null, Priority.newInstance(0), +placementContext); } public AppAddedSchedulerEvent(ApplicationId applicationId, String queue, String user, ReservationId reservationID, Priority appPriority) { -this(applicationId, queue, user, false, reservationID, appPriority); +this(applicationId, queue, user, false, reservationID, appPriority, null); } public AppAddedSchedulerEvent(String user, @@ -47,12 +57,20 @@ public class AppAddedSchedulerEvent extends SchedulerEvent { Priority appPriority) { this(submissionContext.getApplicationId(), submissionContext.getQueue(), user, isAppRecovering, submissionContext.getReservationID(), -appPriority); +appPriority, null); + } + + public AppAddedSchedulerEvent(String user, + ApplicationSubmissionContext submissionContext, boolean isAppRecovering, + Priority appPriority, ApplicationPlacementContext placementContext) { +this(submissionContext.getApplicationId(), submissionContext.getQueue(), +user, isAppRecovering, submissionContext.getReservationID(), +appPriority, placementContext); } public AppAddedSchedulerEvent(ApplicationId applicationId, String queue, String user, boolean isAppRecovering, ReservationId reservationID, - Priority appPriority) { + Priority appPriority, ApplicationPlacementContext placementContext) { super(SchedulerEventType.APP_ADDED); this.applicationId = applicationId; this.queue = queue; @@ -60,6 +78,7 @@ public class AppAddedSchedulerEvent extends SchedulerEvent { this.reservationID = reservationID; this.isAppRecovering = isAppRecovering; this.appPriority = appPriority; +this.placementContext = placementContext; } public ApplicationId getApplicationId() { @@ -85,4 +104,8 @@ public class AppAddedSchedulerEvent extends SchedulerEvent { public Priority getApplicatonPriority() { return appPriority; } + + public ApplicationPlacementContext getPlacementContext() { +return placementContext; + } }
[06/46] hadoop git commit: HADOOP-15008. Fixed period unit calculation for Hadoop Metrics V2. (Contribute by Erik Krogen)
HADOOP-15008. Fixed period unit calculation for Hadoop Metrics V2. (Contribute by Erik Krogen) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b68b8ff Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b68b8ff Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b68b8ff Branch: refs/heads/YARN-5881 Commit: 1b68b8ff2c6d4704f748d47fc0b903636f3e98c7 Parents: 975a57a Author: Eric YangAuthored: Mon Nov 13 12:40:45 2017 -0500 Committer: Eric Yang Committed: Mon Nov 13 12:42:43 2017 -0500 -- .../metrics2/impl/MetricsSinkAdapter.java | 12 ++--- .../hadoop/metrics2/impl/MetricsSystemImpl.java | 7 ++- .../metrics2/impl/TestMetricsSystemImpl.java| 49 3 files changed, 61 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b68b8ff/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java index 1199ebd..f2e607b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java @@ -51,7 +51,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { private final Thread sinkThread; private volatile boolean stopping = false; private volatile boolean inError = false; - private final int period, firstRetryDelay, retryCount; + private final int periodMs, firstRetryDelay, retryCount; private final long oobPutTimeout; private final float retryBackoff; private final MetricsRegistry registry = new MetricsRegistry("sinkadapter"); @@ -62,7 +62,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { MetricsSinkAdapter(String name, String description, MetricsSink sink, String context, MetricsFilter sourceFilter, MetricsFilter recordFilter, MetricsFilter metricFilter, - int period, int queueCapacity, int retryDelay, + int periodMs, int queueCapacity, int retryDelay, float retryBackoff, int retryCount) { this.name = checkNotNull(name, "name"); this.description = description; @@ -71,7 +71,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { this.sourceFilter = sourceFilter; this.recordFilter = recordFilter; this.metricFilter = metricFilter; -this.period = checkArg(period, period > 0, "period"); +this.periodMs = checkArg(periodMs, periodMs > 0, "period"); firstRetryDelay = checkArg(retryDelay, retryDelay > 0, "retry delay"); this.retryBackoff = checkArg(retryBackoff, retryBackoff>1, "retry backoff"); oobPutTimeout = (long) @@ -93,9 +93,9 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { sinkThread.setDaemon(true); } - boolean putMetrics(MetricsBuffer buffer, long logicalTime) { -if (logicalTime % period == 0) { - LOG.debug("enqueue, logicalTime="+ logicalTime); + boolean putMetrics(MetricsBuffer buffer, long logicalTimeMs) { +if (logicalTimeMs % periodMs == 0) { + LOG.debug("enqueue, logicalTime="+ logicalTimeMs); if (queue.enqueue(buffer)) { refreshQueueSizeGauge(); return true; http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b68b8ff/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java index ee1672e..624edc9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java @@ -519,7 +519,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource { conf.getFilter(SOURCE_FILTER_KEY), conf.getFilter(RECORD_FILTER_KEY), conf.getFilter(METRIC_FILTER_KEY), -conf.getInt(PERIOD_KEY, PERIOD_DEFAULT), +conf.getInt(PERIOD_KEY, PERIOD_DEFAULT) * 1000, conf.getInt(QUEUE_CAPACITY_KEY, QUEUE_CAPACITY_DEFAULT), conf.getInt(RETRY_DELAY_KEY,
[10/46] hadoop git commit: YARN-7369. Improve the resource types docs
YARN-7369. Improve the resource types docs Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/040a38dc Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/040a38dc Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/040a38dc Branch: refs/heads/YARN-5881 Commit: 040a38dc493adf44e9552b8971acf36188c30152 Parents: 2e512f0 Author: Daniel TempletonAuthored: Mon Nov 13 11:05:07 2017 -0800 Committer: Daniel Templeton Committed: Mon Nov 13 11:05:07 2017 -0800 -- hadoop-project/src/site/site.xml| 2 +- .../src/site/markdown/ResourceModel.md | 275 +++ .../src/site/markdown/ResourceProfiles.md | 116 3 files changed, 276 insertions(+), 117 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/040a38dc/hadoop-project/src/site/site.xml -- diff --git a/hadoop-project/src/site/site.xml b/hadoop-project/src/site/site.xml index 57cff9a..be48ddb 100644 --- a/hadoop-project/src/site/site.xml +++ b/hadoop-project/src/site/site.xml @@ -128,6 +128,7 @@ + @@ -143,7 +144,6 @@ - http://git-wip-us.apache.org/repos/asf/hadoop/blob/040a38dc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md new file mode 100644 index 000..75e5c92 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/ResourceModel.md @@ -0,0 +1,275 @@ + + +Hadoop: YARN Resource Configuration +=== + +Overview + +YARN supports an extensible resource model. By default YARN tracks CPU and +memory for all nodes, applications, and queues, but the resource definition +can be extended to include arbitrary "countable" resources. A countable +resource is a resource that is consumed while a container is running, but is +released afterwards. CPU and memory are both countable resources. Other examples +include GPU resources and software licenses. + +In addition, YARN also supports the use of "resource profiles", which allow a +user to specify multiple resource requests through a single profile, similar to +Amazon Web Services Elastic Compute Cluster instance types. For example, +"large" might mean 8 virtual cores and 16GB RAM. + +Configuration +- + +The following configuration properties are supported. See below for details. + +`yarn-site.xml` + +| Configuration Property | Description | +|: |: | +| `yarn.resourcemanager.resource-profiles.enabled` | Indicates whether resource profiles support is enabled. Defaults to `false`. | + +`resource-types.xml` + +| Configuration Property | Value | Description | +|: |: |: | +| `yarn.resource-types` | Comma-separated list of additional resources. May not include `memory`, `memory-mb`, or `vcores` | +| `yarn.resource-types..units` | Default unit for the specified resource type | +| `yarn.resource-types..minimum` | The minimum request for the specified resource type | +| `yarn.resource-types..maximum` | The maximum request for the specified resource type | + +`nodeÂ-resources.xml` + +| Configuration Property | Value | Description | +|: |: |: | +| `yarn.nodemanager.resource-type.` | The count of the specified resource available from the node manager | + +Please note that the `resource-types.xml` and `nodeÂ-resources.xml` files +also need to be placed in the same configuration directory as `yarn-site.xml` if +they are used. Alternatively, the properties may be placed into the +`yarn-site.xml` file instead. + +YARN Resource Model +--- + +### Resource Manager +The resource manager is the final arbiter of what resources in the cluster are +tracked. The resource manager loads its resource definition from XML +configuration files. For example, to define a new resource in addition to +CPU and memory, the following property should be configured: + +```xml + + +yarn.resource-types +resource1,resource2 + +The resources to be used for scheduling. Use resource-types.xml +to specify details about the individual resource types. + + + +``` + +A valid resource name must begin with a letter and contain only letters, numbers, +and any of: '.', '_', or '-'. A valid resource name may also be optionally +preceded by a name space followed by a slash. A valid name space consists of +period-separated groups of
[07/46] hadoop git commit: Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk
Merge branch 'trunk' of https://git-wip-us.apache.org/repos/asf/hadoop into trunk Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d6bab94 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d6bab94 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d6bab94 Branch: refs/heads/YARN-5881 Commit: 0d6bab94c49dbc783912ad9903e3d76849b8122d Parents: 1b68b8f 782681c Author: Eric YangAuthored: Mon Nov 13 12:43:18 2017 -0500 Committer: Eric Yang Committed: Mon Nov 13 12:43:18 2017 -0500 -- -- - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[08/46] hadoop git commit: YARN-7447. Fixed bug in create YARN services via RM. (Contributed by Billie Rinaldi)
YARN-7447. Fixed bug in create YARN services via RM. (Contributed by Billie Rinaldi) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fa4b5c66 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fa4b5c66 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fa4b5c66 Branch: refs/heads/YARN-5881 Commit: fa4b5c669c04d83d92bc73ad72e8311d93c3ed0d Parents: 0d6bab9 Author: Eric YangAuthored: Mon Nov 13 13:59:58 2017 -0500 Committer: Eric Yang Committed: Mon Nov 13 13:59:58 2017 -0500 -- hadoop-yarn-project/hadoop-yarn/bin/yarn | 8 1 file changed, 8 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/fa4b5c66/hadoop-yarn-project/hadoop-yarn/bin/yarn -- diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn b/hadoop-yarn-project/hadoop-yarn/bin/yarn index 00596c2..d7b44b9 100755 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn @@ -149,6 +149,14 @@ ${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}" if [[ -n "${YARN_RESOURCEMANAGER_HEAPSIZE}" ]]; then HADOOP_HEAPSIZE_MAX="${YARN_RESOURCEMANAGER_HEAPSIZE}" fi + local sld="${HADOOP_YARN_HOME}/${YARN_DIR},\ +${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR},\ +${HADOOP_HDFS_HOME}/${HDFS_DIR},\ +${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR},\ +${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR},\ +${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}" + hadoop_translate_cygwin_path sld + hadoop_add_param HADOOP_OPTS service.libdir "-Dservice.libdir=${sld}" ;; rmadmin) HADOOP_CLASSNAME='org.apache.hadoop.yarn.client.cli.RMAdminCLI' - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[13/46] hadoop git commit: HADOOP-15036. Update LICENSE.txt for HADOOP-14840. (asuresh)
HADOOP-15036. Update LICENSE.txt for HADOOP-14840. (asuresh) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f871b754 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f871b754 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f871b754 Branch: refs/heads/YARN-5881 Commit: f871b7541a5375eb117eafb9a091e4f59401231f Parents: b07e68b Author: Arun SureshAuthored: Mon Nov 13 14:37:36 2017 -0800 Committer: Arun Suresh Committed: Mon Nov 13 14:37:36 2017 -0800 -- LICENSE.txt | 25 + 1 file changed, 25 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f871b754/LICENSE.txt -- diff --git a/LICENSE.txt b/LICENSE.txt index b0cef03..447c609 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -699,6 +699,31 @@ hadoop-tools/hadoop-sls/src/main/html/js/thirdparty/jquery.js hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/jquery Apache HBase - Server which contains JQuery minified javascript library version 1.8.3 Microsoft JDBC Driver for SQLServer - version 6.2.1.jre7 + + +MIT License + +Copyright (c) 2003-2017 Optimatika + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +For: oj! Algorithms - version 43.0 - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[14/46] hadoop git commit: YARN-6078. Containers stuck in Localizing state. Contributed by Billie Rinaldi.
YARN-6078. Containers stuck in Localizing state. Contributed by Billie Rinaldi. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e14f03df Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e14f03df Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e14f03df Branch: refs/heads/YARN-5881 Commit: e14f03dfbf078de63126a1e882261081b9ec6778 Parents: f871b75 Author: Junping DuAuthored: Mon Nov 13 15:27:37 2017 -0800 Committer: Junping Du Committed: Mon Nov 13 15:27:37 2017 -0800 -- .../localizer/ResourceLocalizationService.java | 30 .../TestResourceLocalizationService.java| 144 +++ 2 files changed, 174 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/e14f03df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index 29fc747..17aa7d9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -74,6 +74,7 @@ import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.DiskValidator; import org.apache.hadoop.util.DiskValidatorFactory; +import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.apache.hadoop.util.concurrent.HadoopScheduledThreadPoolExecutor; @@ -808,6 +809,7 @@ public class ResourceLocalizationService extends CompositeService return; // ignore; already gone } privLocalizers.remove(locId); +LOG.info("Interrupting localizer for " + locId); localizer.interrupt(); } } @@ -1189,6 +1191,34 @@ public class ResourceLocalizationService extends CompositeService } @Override +public void interrupt() { + boolean destroyedShell = false; + try { +for (Shell shell : Shell.getAllShells()) { + try { +if (shell.getWaitingThread() != null && +shell.getWaitingThread().equals(this) && +shell.getProcess() != null && +shell.getProcess().isAlive()) { + LOG.info("Destroying localization shell process for " + + localizerId); + shell.getProcess().destroy(); + destroyedShell = true; + break; +} + } catch (Exception e) { +LOG.warn("Failed to destroy localization shell process for " + +localizerId, e); + } +} + } finally { +if (!destroyedShell) { + super.interrupt(); +} + } +} + +@Override @SuppressWarnings("unchecked") // dispatcher not typed public void run() { Path nmPrivateCTokensPath = null; http://git-wip-us.apache.org/repos/asf/hadoop/blob/e14f03df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java index d863c6a..c180545 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestResourceLocalizationService.java +++
[05/46] hadoop git commit: HADOOP-15008. Fixed period unit calculation for Hadoop Metrics V2.
HADOOP-15008. Fixed period unit calculation for Hadoop Metrics V2. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/782681c7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/782681c7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/782681c7 Branch: refs/heads/YARN-5881 Commit: 782681c73e4ae7a02206d4d26635bb1e4984fa24 Parents: 975a57a Author: Eric YangAuthored: Mon Nov 13 12:40:45 2017 -0500 Committer: Eric Yang Committed: Mon Nov 13 12:40:45 2017 -0500 -- .../metrics2/impl/MetricsSinkAdapter.java | 12 ++--- .../hadoop/metrics2/impl/MetricsSystemImpl.java | 7 ++- .../metrics2/impl/TestMetricsSystemImpl.java| 49 3 files changed, 61 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/782681c7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java index 1199ebd..f2e607b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSinkAdapter.java @@ -51,7 +51,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { private final Thread sinkThread; private volatile boolean stopping = false; private volatile boolean inError = false; - private final int period, firstRetryDelay, retryCount; + private final int periodMs, firstRetryDelay, retryCount; private final long oobPutTimeout; private final float retryBackoff; private final MetricsRegistry registry = new MetricsRegistry("sinkadapter"); @@ -62,7 +62,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { MetricsSinkAdapter(String name, String description, MetricsSink sink, String context, MetricsFilter sourceFilter, MetricsFilter recordFilter, MetricsFilter metricFilter, - int period, int queueCapacity, int retryDelay, + int periodMs, int queueCapacity, int retryDelay, float retryBackoff, int retryCount) { this.name = checkNotNull(name, "name"); this.description = description; @@ -71,7 +71,7 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { this.sourceFilter = sourceFilter; this.recordFilter = recordFilter; this.metricFilter = metricFilter; -this.period = checkArg(period, period > 0, "period"); +this.periodMs = checkArg(periodMs, periodMs > 0, "period"); firstRetryDelay = checkArg(retryDelay, retryDelay > 0, "retry delay"); this.retryBackoff = checkArg(retryBackoff, retryBackoff>1, "retry backoff"); oobPutTimeout = (long) @@ -93,9 +93,9 @@ class MetricsSinkAdapter implements SinkQueue.Consumer { sinkThread.setDaemon(true); } - boolean putMetrics(MetricsBuffer buffer, long logicalTime) { -if (logicalTime % period == 0) { - LOG.debug("enqueue, logicalTime="+ logicalTime); + boolean putMetrics(MetricsBuffer buffer, long logicalTimeMs) { +if (logicalTimeMs % periodMs == 0) { + LOG.debug("enqueue, logicalTime="+ logicalTimeMs); if (queue.enqueue(buffer)) { refreshQueueSizeGauge(); return true; http://git-wip-us.apache.org/repos/asf/hadoop/blob/782681c7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java index ee1672e..624edc9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/impl/MetricsSystemImpl.java @@ -519,7 +519,7 @@ public class MetricsSystemImpl extends MetricsSystem implements MetricsSource { conf.getFilter(SOURCE_FILTER_KEY), conf.getFilter(RECORD_FILTER_KEY), conf.getFilter(METRIC_FILTER_KEY), -conf.getInt(PERIOD_KEY, PERIOD_DEFAULT), +conf.getInt(PERIOD_KEY, PERIOD_DEFAULT) * 1000, conf.getInt(QUEUE_CAPACITY_KEY, QUEUE_CAPACITY_DEFAULT), conf.getInt(RETRY_DELAY_KEY, RETRY_DELAY_DEFAULT),
[12/46] hadoop git commit: Addendum patch for Configuration fix. (Jason Lowe via asuresh)
Addendum patch for Configuration fix. (Jason Lowe via asuresh) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b07e68b0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b07e68b0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b07e68b0 Branch: refs/heads/YARN-5881 Commit: b07e68b02a34d272114dda4194992a847928aef8 Parents: 4908a89 Author: Arun SureshAuthored: Mon Nov 13 14:03:50 2017 -0800 Committer: Arun Suresh Committed: Mon Nov 13 14:03:50 2017 -0800 -- .../src/main/java/org/apache/hadoop/conf/Configuration.java| 6 -- 1 file changed, 4 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b07e68b0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index dfbeec7..fce2194 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -2962,7 +2962,8 @@ public class Configuration implements Iterable >, // xi:include are treated as inline and retain current source URL include = getResource(confInclude); if (include != null) { - Resource classpathResource = new Resource(include, name); + Resource classpathResource = new Resource(include, name, + wrapper.isParserRestricted()); loadResource(properties, classpathResource, quiet); } else { URL url; @@ -2983,7 +2984,8 @@ public class Configuration implements Iterable >, } url = href.toURI().toURL(); } - Resource uriResource = new Resource(url, name); + Resource uriResource = new Resource(url, name, + wrapper.isParserRestricted()); loadResource(properties, uriResource, quiet); } break; - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[02/46] hadoop git commit: YARN-7475. Fix Container log link in new YARN UI. (Sunil G via Subru).
YARN-7475. Fix Container log link in new YARN UI. (Sunil G via Subru). (cherry picked from commit 3c5b46c2edd69bb238d635ae61ff91656dec23df) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e260778 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e260778 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e260778 Branch: refs/heads/YARN-5881 Commit: 3e26077848ed1d7461576116a9ae841d38aa3ef1 Parents: ff9f7fc Author: Subru KrishnanAuthored: Sun Nov 12 09:18:08 2017 -0800 Committer: Subru Krishnan Committed: Sun Nov 12 09:53:39 2017 -0800 -- .../src/main/webapp/app/adapters/yarn-container-log.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e260778/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js index 8d1b12b..df46127 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-container-log.js @@ -42,9 +42,9 @@ export default DS.RESTAdapter.extend({ var nodeHttpAddr = splits[0]; var containerId = splits[1]; var filename = splits[2]; -this.host = this.get('host') + nodeHttpAddr; var url = this._buildURL(); -url = url + "/containerlogs/" + containerId + "/" + filename; +url = url.replace("{nodeAddress}", nodeHttpAddr) + "/containerlogs/" + + containerId + "/" + filename; return url; }, - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[11/46] hadoop git commit: HDFS-12705. WebHdfsFileSystem exceptions should retain the caused by exception. Contributed by Hanisha Koneru.
HDFS-12705. WebHdfsFileSystem exceptions should retain the caused by exception. Contributed by Hanisha Koneru. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4908a897 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4908a897 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4908a897 Branch: refs/heads/YARN-5881 Commit: 4908a8970eaf500642a9d8427e322032c1ec047a Parents: 040a38d Author: Arpit AgarwalAuthored: Mon Nov 13 11:30:39 2017 -0800 Committer: Arpit Agarwal Committed: Mon Nov 13 11:30:39 2017 -0800 -- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 1 + .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 59 2 files changed, 60 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4908a897/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 34f5d6e..c1aef49 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -780,6 +780,7 @@ public class WebHdfsFileSystem extends FileSystem try { IOException newIoe = ioe.getClass().getConstructor(String.class) .newInstance(node + ": " + ioe.getMessage()); +newIoe.initCause(ioe.getCause()); newIoe.setStackTrace(ioe.getStackTrace()); ioe = newIoe; } catch (NoSuchMethodException | SecurityException http://git-wip-us.apache.org/repos/asf/hadoop/blob/4908a897/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 3ee8ad0..500ec0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -1452,4 +1452,63 @@ public class TestWebHDFS { } } } + + /** + * Tests that {@link WebHdfsFileSystem.AbstractRunner} propagates original + * exception's stacktrace and cause during runWithRetry attempts. + * @throws Exception + */ + @Test + public void testExceptionPropogationInAbstractRunner() throws Exception{ +final Configuration conf = WebHdfsTestUtil.createConf(); +final Path dir = new Path("/testExceptionPropogationInAbstractRunner"); + +conf.setBoolean(HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, true); + +final short numDatanodes = 1; +final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) +.numDataNodes(numDatanodes) +.build(); +try { + cluster.waitActive(); + final FileSystem fs = WebHdfsTestUtil + .getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); + + //create a file + final long length = 1L << 20; + final Path file1 = new Path(dir, "testFile"); + + DFSTestUtil.createFile(fs, file1, length, numDatanodes, 20120406L); + + //get file status and check that it was written properly. + final FileStatus s1 = fs.getFileStatus(file1); + assertEquals("Write failed for file " + file1, length, s1.getLen()); + + FSDataInputStream in = fs.open(file1); + in.read(); // Connection is made only when the first read() occurs. + final WebHdfsInputStream webIn = + (WebHdfsInputStream)(in.getWrappedStream()); + + final String msg = "Throwing dummy exception"; + IOException ioe = new IOException(msg, new DummyThrowable()); + + WebHdfsFileSystem.ReadRunner readRunner = spy(webIn.getReadRunner()); + doThrow(ioe).when(readRunner).getResponse(any(HttpURLConnection.class)); + + webIn.setReadRunner(readRunner); + + try { +webIn.read(); +fail("Read should have thrown IOException."); + } catch (IOException e) { +assertTrue(e.getMessage().contains(msg)); +assertTrue(e.getCause() instanceof DummyThrowable); + } +} finally { + cluster.shutdown(); +} + } + + final static class DummyThrowable extends Throwable { + } } - To unsubscribe,
[04/46] hadoop git commit: HADOOP-15031. Fix javadoc issues in Hadoop Common. Contributed by Mukul Kumar Singh.
HADOOP-15031. Fix javadoc issues in Hadoop Common. Contributed by Mukul Kumar Singh. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/975a57a6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/975a57a6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/975a57a6 Branch: refs/heads/YARN-5881 Commit: 975a57a6886e81e412bea35bf597beccc807a66f Parents: fb62bd6 Author: Akira AjisakaAuthored: Mon Nov 13 23:11:03 2017 +0900 Committer: Akira Ajisaka Committed: Mon Nov 13 23:12:23 2017 +0900 -- .../src/main/java/org/apache/hadoop/fs/FileSystem.java| 3 +-- .../hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/975a57a6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 64021ad..be0ec87 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -973,8 +973,7 @@ public abstract class FileSystem extends Configured implements Closeable { * @param opt If absent, assume {@link HandleOpt#path()}. * @throws IllegalArgumentException If the FileStatus does not belong to * this FileSystem - * @throws UnsupportedOperationException If - * {@link #createPathHandle(FileStatus, HandleOpt[])} + * @throws UnsupportedOperationException If {@link #createPathHandle} * not overridden by subclass. * @throws UnsupportedOperationException If this FileSystem cannot enforce * the specified constraints. http://git-wip-us.apache.org/repos/asf/hadoop/blob/975a57a6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java index 550e6b9..e455abf 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Options.java @@ -338,8 +338,7 @@ public final class Options { } /** - * Utility function for mapping - * {@link FileSystem#getPathHandle(FileStatus, HandleOpt[])} to a + * Utility function for mapping {@link FileSystem#getPathHandle} to a * fixed set of handle options. * @param fs Target filesystem * @param opt Options to bind in partially evaluated function - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[01/46] hadoop git commit: YARN-7452. Decommissioning node default value to be zero in new YARN UI. Contributed by Vasudevan Skm. [Forced Update!]
Repository: hadoop Updated Branches: refs/heads/YARN-5881 bd11c9f0a -> c7970dced (forced update) YARN-7452. Decommissioning node default value to be zero in new YARN UI. Contributed by Vasudevan Skm. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff9f7fcf Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff9f7fcf Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff9f7fcf Branch: refs/heads/YARN-5881 Commit: ff9f7fcf7f67095f3ab9d257624dee6e16363b1e Parents: 2c2b7a3 Author: Sunil GAuthored: Sat Nov 11 16:52:31 2017 +0530 Committer: Sunil G Committed: Sat Nov 11 16:52:31 2017 +0530 -- .../hadoop-yarn-ui/src/main/webapp/app/models/cluster-metric.js| 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff9f7fcf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-metric.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-metric.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-metric.js index dcc0c29..0be0d83 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-metric.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/models/cluster-metric.js @@ -89,7 +89,7 @@ export default DS.Model.extend({ }); arr.push({ label: "Decommissioning", - value: this.get("decommissioningNodes") + value: this.get("decommissioningNodes") || 0 }); arr.push({ label: "Decomissioned", - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[03/46] hadoop git commit: YARN-7445. Render Applications and Services page with filters in new YARN UI. Contributed by Vasudevan Skm.
YARN-7445. Render Applications and Services page with filters in new YARN UI. Contributed by Vasudevan Skm. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb62bd62 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb62bd62 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb62bd62 Branch: refs/heads/YARN-5881 Commit: fb62bd625f53f0407f711317b208a6e4de8e43bc Parents: 3e26077 Author: Sunil GAuthored: Mon Nov 13 19:41:49 2017 +0530 Committer: Sunil G Committed: Mon Nov 13 19:41:49 2017 +0530 -- .gitignore | 3 + .../components/em-table-simple-status-cell.js | 31 ++ .../webapp/app/controllers/app-table-columns.js | 30 -- .../webapp/app/controllers/yarn-apps/apps.js| 5 +- .../webapp/app/controllers/yarn-services.js | 4 +- .../src/main/webapp/app/styles/app.css | 101 +-- .../components/em-table-simple-status-cell.hbs | 27 + .../src/main/webapp/app/templates/yarn-apps.hbs | 64 +--- .../main/webapp/app/templates/yarn-services.hbs | 74 ++ .../hadoop-yarn-ui/src/main/webapp/bower.json | 3 +- .../src/main/webapp/config/environment.js | 1 - .../src/main/webapp/ember-cli-build.js | 1 + .../src/main/webapp/jsconfig.json | 10 +- .../hadoop-yarn-ui/src/main/webapp/package.json | 2 +- .../em-table-simple-status-cell-test.js | 43 .../hadoop-yarn-ui/src/main/webapp/yarn.lock| 6 +- 16 files changed, 250 insertions(+), 155 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb62bd62/.gitignore -- diff --git a/.gitignore b/.gitignore index 724162d..817556f 100644 --- a/.gitignore +++ b/.gitignore @@ -44,3 +44,6 @@ hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/dist hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/tmp yarnregistry.pdf patchprocess/ + + +.history/ \ No newline at end of file http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb62bd62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-simple-status-cell.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-simple-status-cell.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-simple-status-cell.js new file mode 100644 index 000..af8b605 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/components/em-table-simple-status-cell.js @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import Ember from 'ember'; + +export default Ember.Component.extend({ + content: null, + + classNames: ["em-table-simple-status-cell"], + + statusName: Ember.computed("content", function () { +var status = this.get("content"); + +return status.toLowerCase().capitalize(); + }), +}); http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb62bd62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js index 8a34f1a..05bfad45 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/app-table-columns.js @@ -34,7 +34,8 @@ export default Ember.Controller.extend({ headerTitle: 'Application ID', contentPath: 'id', cellComponentName: 'em-table-linked-cell', - minWidth: "250px", + minWidth: "280px", +
hadoop git commit: YARN-7469. Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit. Contributed by Eric Payne.
Repository: hadoop Updated Branches: refs/heads/branch-2.9 f23f49d4e -> 9eab9a25c YARN-7469. Capacity Scheduler Intra-queue preemption: User can starve if newest app is exactly at user limit. Contributed by Eric Payne. (cherry picked from commit 61ace174cdcbca9d22abce7aa0aa71148f37ad55) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9eab9a25 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9eab9a25 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9eab9a25 Branch: refs/heads/branch-2.9 Commit: 9eab9a25c6957cf089f1945a393eb01f510bbc46 Parents: f23f49d Author: Sunil GAuthored: Thu Nov 16 22:34:23 2017 +0530 Committer: Sunil G Committed: Fri Nov 17 19:42:00 2017 +0530 -- .../FifoIntraQueuePreemptionPlugin.java | 6 ...alCapacityPreemptionPolicyMockFramework.java | 3 ++ ...cityPreemptionPolicyIntraQueueUserLimit.java | 35 3 files changed, 44 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/9eab9a25/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java index 00ae3da..3332f2a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoIntraQueuePreemptionPlugin.java @@ -203,6 +203,12 @@ public class FifoIntraQueuePreemptionPlugin Resources.subtractFromNonNegative(preemtableFromApp, tmpApp.selected); Resources.subtractFromNonNegative(preemtableFromApp, tmpApp.getAMUsed()); + if (context.getIntraQueuePreemptionOrderPolicy() +.equals(IntraQueuePreemptionOrderPolicy.USERLIMIT_FIRST)) { +Resources.subtractFromNonNegative(preemtableFromApp, + tmpApp.getFiCaSchedulerApp().getCSLeafQueue().getMinimumAllocation()); + } + // Calculate toBePreempted from apps as follows: // app.preemptable = min(max(app.used - app.selected - app.ideal, 0), // intra_q_preemptable) http://git-wip-us.apache.org/repos/asf/hadoop/blob/9eab9a25/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java index adb3321..1d56a81 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java @@ -359,6 +359,9 @@ public class ProportionalCapacityPreemptionPolicyMockFramework { queue = (LeafQueue) nameToCSQueues.get(queueName); queue.getApplications().add(app); queue.getAllApplications().add(app); + when(queue.getMinimumAllocation()) + .thenReturn(Resource.newInstance(1,1)); + when(app.getCSLeafQueue()).thenReturn(queue); HashSet users = userMap.get(queueName); if (null == users) {