hadoop git commit: YARN-5453. FairScheduler#update may skip update demand resource of child queue/app if current demand reached maxResource. (sandflee via kasha)
Repository: hadoop Updated Branches: refs/heads/branch-2 06c8ebdb8 -> f7b25420a YARN-5453. FairScheduler#update may skip update demand resource of child queue/app if current demand reached maxResource. (sandflee via kasha) (cherry picked from commit 86ac1ad9fd65c7dd12278372b369de38dc4616db) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7b25420 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7b25420 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7b25420 Branch: refs/heads/branch-2 Commit: f7b25420a11020a5a68d57a5ea17511c26c85fe6 Parents: 06c8ebd Author: Karthik KambatlaAuthored: Wed Nov 9 23:44:02 2016 -0800 Committer: Karthik Kambatla Committed: Wed Nov 9 23:44:26 2016 -0800 -- .../scheduler/fair/FSLeafQueue.java | 15 +++ .../scheduler/fair/FSParentQueue.java | 6 +-- .../scheduler/fair/TestFairScheduler.java | 41 3 files changed, 48 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b25420/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java index 9d5bbe5..c393759 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java @@ -268,20 +268,16 @@ public class FSLeafQueue extends FSQueue { readLock.lock(); try { for (FSAppAttempt sched : runnableApps) { -if (Resources.equals(demand, maxShare)) { - break; -} -updateDemandForApp(sched, maxShare); +updateDemandForApp(sched); } for (FSAppAttempt sched : nonRunnableApps) { -if (Resources.equals(demand, maxShare)) { - break; -} -updateDemandForApp(sched, maxShare); +updateDemandForApp(sched); } } finally { readLock.unlock(); } +// Cap demand to maxShare to limit allocation to maxShare +demand = Resources.componentwiseMin(demand, maxShare); if (LOG.isDebugEnabled()) { LOG.debug("The updated demand for " + getName() + " is " + demand + "; the max is " + maxShare); @@ -290,7 +286,7 @@ public class FSLeafQueue extends FSQueue { } } - private void updateDemandForApp(FSAppAttempt sched, Resource maxRes) { + private void updateDemandForApp(FSAppAttempt sched) { sched.updateDemand(); Resource toAdd = sched.getDemand(); if (LOG.isDebugEnabled()) { @@ -299,7 +295,6 @@ public class FSLeafQueue extends FSQueue { + demand); } demand = Resources.add(demand, toAdd); -demand = Resources.componentwiseMin(demand, maxRes); } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7b25420/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java index d05390b..53ac8c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java @@ -159,16 +159,14 @@ public class FSParentQueue extends FSQueue { childQueue.updateDemand(); Resource toAdd = childQueue.getDemand(); demand = Resources.add(demand, toAdd); -
hadoop git commit: YARN-5453. FairScheduler#update may skip update demand resource of child queue/app if current demand reached maxResource. (sandflee via kasha)
Repository: hadoop Updated Branches: refs/heads/trunk c8bc7a847 -> 86ac1ad9f YARN-5453. FairScheduler#update may skip update demand resource of child queue/app if current demand reached maxResource. (sandflee via kasha) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86ac1ad9 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86ac1ad9 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86ac1ad9 Branch: refs/heads/trunk Commit: 86ac1ad9fd65c7dd12278372b369de38dc4616db Parents: c8bc7a8 Author: Karthik KambatlaAuthored: Wed Nov 9 23:44:02 2016 -0800 Committer: Karthik Kambatla Committed: Wed Nov 9 23:44:02 2016 -0800 -- .../scheduler/fair/FSLeafQueue.java | 15 +++ .../scheduler/fair/FSParentQueue.java | 6 +-- .../scheduler/fair/TestFairScheduler.java | 41 3 files changed, 48 insertions(+), 14 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/86ac1ad9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java index 9d5bbe5..c393759 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java @@ -268,20 +268,16 @@ public class FSLeafQueue extends FSQueue { readLock.lock(); try { for (FSAppAttempt sched : runnableApps) { -if (Resources.equals(demand, maxShare)) { - break; -} -updateDemandForApp(sched, maxShare); +updateDemandForApp(sched); } for (FSAppAttempt sched : nonRunnableApps) { -if (Resources.equals(demand, maxShare)) { - break; -} -updateDemandForApp(sched, maxShare); +updateDemandForApp(sched); } } finally { readLock.unlock(); } +// Cap demand to maxShare to limit allocation to maxShare +demand = Resources.componentwiseMin(demand, maxShare); if (LOG.isDebugEnabled()) { LOG.debug("The updated demand for " + getName() + " is " + demand + "; the max is " + maxShare); @@ -290,7 +286,7 @@ public class FSLeafQueue extends FSQueue { } } - private void updateDemandForApp(FSAppAttempt sched, Resource maxRes) { + private void updateDemandForApp(FSAppAttempt sched) { sched.updateDemand(); Resource toAdd = sched.getDemand(); if (LOG.isDebugEnabled()) { @@ -299,7 +295,6 @@ public class FSLeafQueue extends FSQueue { + demand); } demand = Resources.add(demand, toAdd); -demand = Resources.componentwiseMin(demand, maxRes); } @Override http://git-wip-us.apache.org/repos/asf/hadoop/blob/86ac1ad9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java index d05390b..53ac8c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java @@ -159,16 +159,14 @@ public class FSParentQueue extends FSQueue { childQueue.updateDemand(); Resource toAdd = childQueue.getDemand(); demand = Resources.add(demand, toAdd); -demand = Resources.componentwiseMin(demand, maxShare); if
hadoop git commit: YARN-5843. Incorrect documentation for timeline service entityType/events REST end points (Bibin A Chundatt via Varun Saxena)
Repository: hadoop Updated Branches: refs/heads/trunk c202a1092 -> c8bc7a847 YARN-5843. Incorrect documentation for timeline service entityType/events REST end points (Bibin A Chundatt via Varun Saxena) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8bc7a84 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8bc7a84 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8bc7a84 Branch: refs/heads/trunk Commit: c8bc7a84758f360849b96c2e19c8c41b7e9dbb65 Parents: c202a10 Author: Varun SaxenaAuthored: Thu Nov 10 12:10:03 2016 +0530 Committer: Varun Saxena Committed: Thu Nov 10 12:10:03 2016 +0530 -- .../src/site/markdown/TimelineServer.md | 21 +--- 1 file changed, 9 insertions(+), 12 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8bc7a84/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md index f09909b..ae9faae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/TimelineServer.md @@ -632,7 +632,7 @@ Use the following URI to obtain all the entity objects of a given ### HTTP Operations Supported: -GET http://localhost:8188/ws/v1/timeline/DS_APP_ATTEMPT +GET ### Query Parameters Supported: @@ -687,7 +687,7 @@ will be returned as a collection of container objects. See also HTTP Request: -GET http:///ws/v1/timeline/{entity-type} +GET http://localhost:8188/ws/v1/timeline/DS_APP_ATTEMPT Response Header: @@ -795,7 +795,7 @@ String. HTTP Request: -GET http:///ws/v1/timeline/{entity-type}/{entity-id} +GET http://localhost:8188/ws/v1/timeline/DS_APP_ATTEMPT/appattempt_1430424020775_0003_01 Response Header: @@ -805,8 +805,6 @@ Response Header: Response Body: - http://localhost:8188/ws/v1/timeline/DS_APP_ATTEMPT/appattempt_1430424020775_0003_01 - { "events":[ { @@ -847,15 +845,17 @@ Use the following URI to obtain the event objects of the given `entityType`. ### Query Parameters Supported: -1. `entityIds` - The entity IDs to retrieve events for. +1. `entityId` - The entity IDs to retrieve events for. If null, no events will be returned. + Multiple entityIds can be given as comma separated values. 1. `limit` - A limit on the number of events to return for each entity. If null, defaults to 100 events per entity. 1. `windowStart` - If not null, retrieves only events later than the given time (exclusive) 1. `windowEnd` - If not null, retrieves only events earlier than the given time (inclusive) -1. `eventTypes` - Restricts the events returned to the given types. If null, - events of all types will be returned. +1. `eventType` - Restricts the events returned to the given types. If null, + events of all types will be returned. Multiple eventTypes can be given as + comma separated values. ### Elements of the `events` (Timeline Entity List) Object @@ -882,7 +882,7 @@ Below is the elements of a single event object. Note that `value` of HTTP Request: -GET http:///ws/v1/timeline/entity%20type%200/events +GET http://localhost:8188/ws/v1/timeline/DS_APP_ATTEMPT/events?entityId=appattempt_1430424020775_0003_01 Response Header: @@ -893,9 +893,6 @@ Response Header: Response Body: -GET http://localhost:8188/ws/v1/timeline/DS_APP_ATTEMPT/events?entityId=appattempt_1430424020775_0003_01 - - { "events": [ { - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-5862. TestDiskFailures.testLocalDirsFailures failed (Yufei Gu via Varun Saxena)
Repository: hadoop Updated Branches: refs/heads/branch-2 a6146a102 -> 06c8ebdb8 YARN-5862. TestDiskFailures.testLocalDirsFailures failed (Yufei Gu via Varun Saxena) (cherry picked from commit c202a10923a46a6e7f7f518e6e3dbb6545dbb971) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06c8ebdb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06c8ebdb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06c8ebdb Branch: refs/heads/branch-2 Commit: 06c8ebdb83ecae3bda7269e0bc9d133c1092e8da Parents: a6146a1 Author: Varun SaxenaAuthored: Thu Nov 10 11:41:34 2016 +0530 Committer: Varun Saxena Committed: Thu Nov 10 11:43:57 2016 +0530 -- .../java/org/apache/hadoop/yarn/server/TestDiskFailures.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/06c8ebdb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java index c7e34d8..bf82ec5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java @@ -244,7 +244,9 @@ public class TestDiskFailures { for (int i = 0; i < 10; i++) { Iterator iter = yarnCluster.getResourceManager().getRMContext() .getRMNodes().values().iterator(); - if ((iter.next().getState() != NodeState.UNHEALTHY) == isHealthy) { + // RMNode # might be zero because of timing related issue. + if (iter.hasNext() && + (iter.next().getState() != NodeState.UNHEALTHY) == isHealthy) { break; } // wait for the node health info to go to RM - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-5862. TestDiskFailures.testLocalDirsFailures failed (Yufei Gu via Varun Saxena)
Repository: hadoop Updated Branches: refs/heads/trunk 71adf44c3 -> c202a1092 YARN-5862. TestDiskFailures.testLocalDirsFailures failed (Yufei Gu via Varun Saxena) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c202a109 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c202a109 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c202a109 Branch: refs/heads/trunk Commit: c202a10923a46a6e7f7f518e6e3dbb6545dbb971 Parents: 71adf44 Author: Varun SaxenaAuthored: Thu Nov 10 11:41:34 2016 +0530 Committer: Varun Saxena Committed: Thu Nov 10 11:41:34 2016 +0530 -- .../java/org/apache/hadoop/yarn/server/TestDiskFailures.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c202a109/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java index c7e34d8..bf82ec5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestDiskFailures.java @@ -244,7 +244,9 @@ public class TestDiskFailures { for (int i = 0; i < 10; i++) { Iterator iter = yarnCluster.getResourceManager().getRMContext() .getRMNodes().values().iterator(); - if ((iter.next().getState() != NodeState.UNHEALTHY) == isHealthy) { + // RMNode # might be zero because of timing related issue. + if (iter.hasNext() && + (iter.next().getState() != NodeState.UNHEALTHY) == isHealthy) { break; } // wait for the node health info to go to RM - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-11029. [SPS]:Provide retry mechanism for the blocks which were failed while moving its storage at DNs. Contributed by Uma Maheswara Rao G
Repository: hadoop Updated Branches: refs/heads/HDFS-10285 3adef4f74 -> d7fcee1d6 HDFS-11029. [SPS]:Provide retry mechanism for the blocks which were failed while moving its storage at DNs. Contributed by Uma Maheswara Rao G Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d7fcee1d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d7fcee1d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d7fcee1d Branch: refs/heads/HDFS-10285 Commit: d7fcee1d6870b5ad8e431d76984ee49c9f945c40 Parents: 3adef4f Author: Rakesh RadhakrishnanAuthored: Thu Nov 10 10:09:45 2016 +0530 Committer: Rakesh Radhakrishnan Committed: Thu Nov 10 10:09:45 2016 +0530 -- .../BlockStorageMovementAttemptedItems.java | 221 +++ .../server/namenode/StoragePolicySatisfier.java | 24 +- .../TestBlockStorageMovementAttemptedItems.java | 101 + .../namenode/TestStoragePolicySatisfier.java| 8 +- 4 files changed, 343 insertions(+), 11 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d7fcee1d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java new file mode 100644 index 000..580d0d6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java @@ -0,0 +1,221 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.apache.hadoop.util.Time.monotonicNow; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult; +import org.apache.hadoop.util.Daemon; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.annotations.VisibleForTesting; + +/** + * A monitor class for checking whether block storage movements finished or not. + * If block storage movement results from datanode indicates about the movement + * success, then it will just remove the entries from tracking. If it reports + * failure, then it will add back to needed block storage movements list. If no + * DN reports about movement for longer time, then such items will be retries + * automatically after timeout. The default timeout would be 30mins. + */ +public class BlockStorageMovementAttemptedItems { + public static final Logger LOG = + LoggerFactory.getLogger(BlockStorageMovementAttemptedItems.class); + // A map holds the items which are already taken for blocks movements + // processing and sent to DNs. + private final Map storageMovementAttemptedItems; + private final List storageMovementAttemptedResults; + private volatile boolean spsRunning = true; + private Daemon timerThread = null; + // + // It might take anywhere between 30 to 60 minutes before + // a request is timed out. + // + private long selfRetryTimeout = 30 * 60 * 1000; + + // + // It might take anywhere between 5 to 10 minutes before + // a request is timed out. + // + private long checkTimeout = 5 * 60 * 1000; // minimum value + private BlockStorageMovementNeeded blockStorageMovementNeeded; + + public BlockStorageMovementAttemptedItems(long timeoutPeriod, + long selfRetryTimeout, + BlockStorageMovementNeeded unsatisfiedStorageMovementFiles) { +if (timeoutPeriod > 0) { + this.checkTimeout = Math.min(checkTimeout, timeoutPeriod); +} + +this.selfRetryTimeout = selfRetryTimeout; +
hadoop git commit: HDFS-11120. TestEncryptionZones should waitActive. Contributed by John Zhuge.
Repository: hadoop Updated Branches: refs/heads/branch-2.8 108f09a76 -> d81706cd9 HDFS-11120. TestEncryptionZones should waitActive. Contributed by John Zhuge. (cherry picked from commit 907b1431c0ead934e26cf7dbc51bfab5d7cc5ddc) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d81706cd Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d81706cd Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d81706cd Branch: refs/heads/branch-2.8 Commit: d81706cd997c380f502d55d8c3263a53e533baa4 Parents: 108f09a Author: Xiao ChenAuthored: Wed Nov 9 17:15:19 2016 -0800 Committer: Xiao Chen Committed: Wed Nov 9 17:16:36 2016 -0800 -- .../apache/hadoop/hdfs/TestEncryptionZones.java | 61 +++- 1 file changed, 34 insertions(+), 27 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/d81706cd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index f1925e1..420a8fe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -93,7 +93,9 @@ import org.apache.log4j.Logger; import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.Timeout; import org.mockito.Mockito; import static org.junit.Assert.assertNotNull; @@ -145,6 +147,9 @@ public class TestEncryptionZones { new Path(testRootDir.toString(), "test.jks").toUri(); } + @Rule + public Timeout globalTimeout = new Timeout(120 * 1000); + @Before public void setup() throws Exception { conf = new HdfsConfiguration(); @@ -159,6 +164,7 @@ public class TestEncryptionZones { conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); +cluster.waitActive(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(fs); @@ -230,7 +236,7 @@ public class TestEncryptionZones { * with sticky bits. * @throws Exception */ - @Test(timeout = 6) + @Test public void testTrashStickyBit() throws Exception { // create an EZ /zones/zone1, make it world writable. final Path zoneParent = new Path("/zones"); @@ -293,7 +299,7 @@ public class TestEncryptionZones { * with sticky bits. * @throws Exception */ - @Test(timeout = 6) + @Test public void testProvisionTrash() throws Exception { // create an EZ /zones/zone1 final Path zoneParent = new Path("/zones"); @@ -325,7 +331,8 @@ public class TestEncryptionZones { assertTrue(trashFileStatus.getPermission().getStickyBit()); } - @Test(timeout = 6) + // CHECKSTYLE:OFF:MethodLengthCheck + @Test public void testBasicOperations() throws Exception { int numZones = 0; @@ -484,8 +491,9 @@ public class TestEncryptionZones { assertNumZones(numZones); assertZonePresent(null, nonpersistZone.toString()); } + // CHECKSTYLE:ON:MethodLengthCheck - @Test(timeout = 6) + @Test public void testBasicOperationsRootDir() throws Exception { int numZones = 0; final Path rootDir = new Path("/"); @@ -509,7 +517,7 @@ public class TestEncryptionZones { /** * Test listing encryption zones as a non super user. */ - @Test(timeout = 6) + @Test public void testListEncryptionZonesAsNonSuperUser() throws Exception { final UserGroupInformation user = UserGroupInformation. @@ -543,7 +551,7 @@ public class TestEncryptionZones { /** * Test getEncryptionZoneForPath as a non super user. */ - @Test(timeout = 6) + @Test public void testGetEZAsNonSuperUser() throws Exception { final UserGroupInformation user = UserGroupInformation. @@ -687,12 +695,12 @@ public class TestEncryptionZones { } } - @Test(timeout = 6) + @Test public void testRenameFileSystem() throws Exception { doRenameEncryptionZone(fsWrapper); } - @Test(timeout = 6) + @Test public void testRenameFileContext() throws Exception { doRenameEncryptionZone(fcWrapper); } @@ -702,7 +710,7 @@ public class TestEncryptionZones { return blocks.getFileEncryptionInfo(); } - @Test(timeout = 12) + @Test public void testReadWrite() throws
hadoop git commit: HDFS-11120. TestEncryptionZones should waitActive. Contributed by John Zhuge.
Repository: hadoop Updated Branches: refs/heads/branch-2 a422740bd -> a6146a102 HDFS-11120. TestEncryptionZones should waitActive. Contributed by John Zhuge. (cherry picked from commit 907b1431c0ead934e26cf7dbc51bfab5d7cc5ddc) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a6146a10 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a6146a10 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a6146a10 Branch: refs/heads/branch-2 Commit: a6146a1026db040fe7c13138d37e8c5a3f5d6733 Parents: a422740 Author: Xiao ChenAuthored: Wed Nov 9 17:15:19 2016 -0800 Committer: Xiao Chen Committed: Wed Nov 9 17:16:31 2016 -0800 -- .../apache/hadoop/hdfs/TestEncryptionZones.java | 61 +++- 1 file changed, 34 insertions(+), 27 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a6146a10/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 520e96a..378273f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -94,7 +94,9 @@ import org.apache.log4j.Logger; import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.Timeout; import org.mockito.Mockito; import static org.junit.Assert.assertNotNull; @@ -146,6 +148,9 @@ public class TestEncryptionZones { new Path(testRootDir.toString(), "test.jks").toUri(); } + @Rule + public Timeout globalTimeout = new Timeout(120 * 1000); + @Before public void setup() throws Exception { conf = new HdfsConfiguration(); @@ -160,6 +165,7 @@ public class TestEncryptionZones { conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); +cluster.waitActive(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(fs); @@ -231,7 +237,7 @@ public class TestEncryptionZones { * with sticky bits. * @throws Exception */ - @Test(timeout = 6) + @Test public void testTrashStickyBit() throws Exception { // create an EZ /zones/zone1, make it world writable. final Path zoneParent = new Path("/zones"); @@ -294,7 +300,7 @@ public class TestEncryptionZones { * with sticky bits. * @throws Exception */ - @Test(timeout = 6) + @Test public void testProvisionTrash() throws Exception { // create an EZ /zones/zone1 final Path zoneParent = new Path("/zones"); @@ -326,7 +332,8 @@ public class TestEncryptionZones { assertTrue(trashFileStatus.getPermission().getStickyBit()); } - @Test(timeout = 6) + // CHECKSTYLE:OFF:MethodLengthCheck + @Test public void testBasicOperations() throws Exception { int numZones = 0; @@ -485,8 +492,9 @@ public class TestEncryptionZones { assertNumZones(numZones); assertZonePresent(null, nonpersistZone.toString()); } + // CHECKSTYLE:ON:MethodLengthCheck - @Test(timeout = 6) + @Test public void testBasicOperationsRootDir() throws Exception { int numZones = 0; final Path rootDir = new Path("/"); @@ -510,7 +518,7 @@ public class TestEncryptionZones { /** * Test listing encryption zones as a non super user. */ - @Test(timeout = 6) + @Test public void testListEncryptionZonesAsNonSuperUser() throws Exception { final UserGroupInformation user = UserGroupInformation. @@ -544,7 +552,7 @@ public class TestEncryptionZones { /** * Test getEncryptionZoneForPath as a non super user. */ - @Test(timeout = 6) + @Test public void testGetEZAsNonSuperUser() throws Exception { final UserGroupInformation user = UserGroupInformation. @@ -688,12 +696,12 @@ public class TestEncryptionZones { } } - @Test(timeout = 6) + @Test public void testRenameFileSystem() throws Exception { doRenameEncryptionZone(fsWrapper); } - @Test(timeout = 6) + @Test public void testRenameFileContext() throws Exception { doRenameEncryptionZone(fcWrapper); } @@ -703,7 +711,7 @@ public class TestEncryptionZones { return blocks.getFileEncryptionInfo(); } - @Test(timeout = 12) + @Test public void testReadWrite() throws Exception
hadoop git commit: HDFS-11120. TestEncryptionZones should waitActive. Contributed by John Zhuge.
Repository: hadoop Updated Branches: refs/heads/trunk de3a5f8d0 -> 71adf44c3 HDFS-11120. TestEncryptionZones should waitActive. Contributed by John Zhuge. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71adf44c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71adf44c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71adf44c Branch: refs/heads/trunk Commit: 71adf44c3fc5655700cdc904e61366d438c938eb Parents: de3a5f8 Author: Xiao ChenAuthored: Wed Nov 9 17:15:19 2016 -0800 Committer: Xiao Chen Committed: Wed Nov 9 17:16:07 2016 -0800 -- .../apache/hadoop/hdfs/TestEncryptionZones.java | 61 +++- 1 file changed, 34 insertions(+), 27 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/71adf44c/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index 3a0586e..8605b9e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -94,7 +94,9 @@ import org.apache.log4j.Logger; import org.junit.After; import org.junit.Assert; import org.junit.Before; +import org.junit.Rule; import org.junit.Test; +import org.junit.rules.Timeout; import org.mockito.Mockito; import static org.junit.Assert.assertNotNull; @@ -146,6 +148,9 @@ public class TestEncryptionZones { new Path(testRootDir.toString(), "test.jks").toUri(); } + @Rule + public Timeout globalTimeout = new Timeout(120 * 1000); + @Before public void setup() throws Exception { conf = new HdfsConfiguration(); @@ -160,6 +165,7 @@ public class TestEncryptionZones { conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_ENCRYPTION_ZONES_NUM_RESPONSES, 2); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); +cluster.waitActive(); Logger.getLogger(EncryptionZoneManager.class).setLevel(Level.TRACE); fs = cluster.getFileSystem(); fsWrapper = new FileSystemTestWrapper(fs); @@ -231,7 +237,7 @@ public class TestEncryptionZones { * with sticky bits. * @throws Exception */ - @Test(timeout = 6) + @Test public void testTrashStickyBit() throws Exception { // create an EZ /zones/zone1, make it world writable. final Path zoneParent = new Path("/zones"); @@ -294,7 +300,7 @@ public class TestEncryptionZones { * with sticky bits. * @throws Exception */ - @Test(timeout = 6) + @Test public void testProvisionTrash() throws Exception { // create an EZ /zones/zone1 final Path zoneParent = new Path("/zones"); @@ -326,7 +332,8 @@ public class TestEncryptionZones { assertTrue(trashFileStatus.getPermission().getStickyBit()); } - @Test(timeout = 6) + // CHECKSTYLE:OFF:MethodLengthCheck + @Test public void testBasicOperations() throws Exception { int numZones = 0; @@ -485,8 +492,9 @@ public class TestEncryptionZones { assertNumZones(numZones); assertZonePresent(null, nonpersistZone.toString()); } + // CHECKSTYLE:ON:MethodLengthCheck - @Test(timeout = 6) + @Test public void testBasicOperationsRootDir() throws Exception { int numZones = 0; final Path rootDir = new Path("/"); @@ -510,7 +518,7 @@ public class TestEncryptionZones { /** * Test listing encryption zones as a non super user. */ - @Test(timeout = 6) + @Test public void testListEncryptionZonesAsNonSuperUser() throws Exception { final UserGroupInformation user = UserGroupInformation. @@ -544,7 +552,7 @@ public class TestEncryptionZones { /** * Test getEncryptionZoneForPath as a non super user. */ - @Test(timeout = 6) + @Test public void testGetEZAsNonSuperUser() throws Exception { final UserGroupInformation user = UserGroupInformation. @@ -688,12 +696,12 @@ public class TestEncryptionZones { } } - @Test(timeout = 6) + @Test public void testRenameFileSystem() throws Exception { doRenameEncryptionZone(fsWrapper); } - @Test(timeout = 6) + @Test public void testRenameFileContext() throws Exception { doRenameEncryptionZone(fcWrapper); } @@ -703,7 +711,7 @@ public class TestEncryptionZones { return blocks.getFileEncryptionInfo(); } - @Test(timeout = 12) + @Test public void testReadWrite() throws Exception { final HdfsAdmin dfsAdmin = new
[1/2] hadoop git commit: YARN-5611. Provide an API to update lifetime of an application. Contributed by Rohith Sharma K S
Repository: hadoop Updated Branches: refs/heads/branch-2 8a2998c08 -> a422740bd http://git-wip-us.apache.org/repos/asf/hadoop/blob/a422740b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java index 19a2c7b..d194204 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java @@ -18,9 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor; -import java.util.EnumSet; -import java.util.HashMap; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import org.apache.commons.logging.Log; @@ -33,7 +32,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.util.AbstractLivelinessMonitor; import org.apache.hadoop.yarn.util.SystemClock; @@ -47,12 +45,6 @@ public class RMAppLifetimeMonitor private static final Log LOG = LogFactory.getLog(RMAppLifetimeMonitor.class); private RMContext rmContext; - private MapmonitoredApps = - new HashMap (); - - private static final EnumSet COMPLETED_APP_STATES = - EnumSet.of(RMAppState.FINISHED, RMAppState.FINISHING, RMAppState.FAILED, - RMAppState.KILLED, RMAppState.FINAL_SAVING, RMAppState.KILLING); public RMAppLifetimeMonitor(RMContext rmContext) { super(RMAppLifetimeMonitor.class.getName(), SystemClock.getInstance()); @@ -61,14 +53,16 @@ public class RMAppLifetimeMonitor @Override protected void serviceInit(Configuration conf) throws Exception { -long monitorInterval = conf.getLong( -YarnConfiguration.RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS, -YarnConfiguration.DEFAULT_RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS); +long monitorInterval = +conf.getLong(YarnConfiguration.RM_APPLICATION_MONITOR_INTERVAL_MS, +YarnConfiguration.DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS); if (monitorInterval <= 0) { monitorInterval = - YarnConfiguration.DEFAULT_RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS; + YarnConfiguration.DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS; } setMonitorInterval(monitorInterval); +setExpireInterval(0); // No need of expire interval for App. +setResetTimeOnStart(false); // do not reset expire time on restart LOG.info("Application lifelime monitor interval set to " + monitorInterval + " ms."); super.serviceInit(conf); @@ -77,57 +71,42 @@ public class RMAppLifetimeMonitor @SuppressWarnings("unchecked") @Override protected synchronized void expire(RMAppToMonitor monitoredAppKey) { -Long remove = monitoredApps.remove(monitoredAppKey); ApplicationId appId = monitoredAppKey.getApplicationId(); RMApp app = rmContext.getRMApps().get(appId); if (app == null) { return; } -// Don't trigger a KILL event if application is in completed states -if (!COMPLETED_APP_STATES.contains(app.getState())) { - String diagnostics = - "Application killed due to exceeding its lifetime period " + remove - + " milliseconds"; - rmContext.getDispatcher().getEventHandler() - .handle(new RMAppEvent(appId, RMAppEventType.KILL, diagnostics)); -} else { - LOG.info("Application " + appId - + " is about to complete. So not killing the application."); -} +String diagnostics = +"Application killed due to exceeding its lifetime period"; +rmContext.getDispatcher().getEventHandler() +.handle(new RMAppEvent(appId, RMAppEventType.KILL, diagnostics)); } - public synchronized void registerApp(ApplicationId appId, - ApplicationTimeoutType timeoutType, long monitorStartTime, long timeout) { + public void registerApp(ApplicationId appId, + ApplicationTimeoutType timeoutType, long
[2/2] hadoop git commit: YARN-5611. Provide an API to update lifetime of an application. Contributed by Rohith Sharma K S
YARN-5611. Provide an API to update lifetime of an application. Contributed by Rohith Sharma K S Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a422740b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a422740b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a422740b Branch: refs/heads/branch-2 Commit: a422740bd7f1489d9c471a3f6cf4ab5b1efa14d8 Parents: 8a2998c Author: Jian HeAuthored: Wed Nov 9 14:33:58 2016 -0800 Committer: Jian He Committed: Wed Nov 9 16:39:46 2016 -0800 -- .../hadoop/mapred/TestClientRedirect.java | 9 + .../yarn/api/ApplicationClientProtocol.java | 23 ++ .../UpdateApplicationTimeoutsRequest.java | 81 +++ .../UpdateApplicationTimeoutsResponse.java | 46 .../records/ApplicationSubmissionContext.java | 4 + .../hadoop/yarn/conf/YarnConfiguration.java | 6 +- .../main/proto/applicationclient_protocol.proto | 1 + .../src/main/proto/yarn_protos.proto| 5 + .../src/main/proto/yarn_service_protos.proto| 9 + .../ApplicationClientProtocolPBClientImpl.java | 21 +- .../ApplicationClientProtocolPBServiceImpl.java | 22 ++ .../UpdateApplicationTimeoutsRequestPBImpl.java | 220 +++ ...UpdateApplicationTimeoutsResponsePBImpl.java | 73 ++ .../yarn/util/AbstractLivelinessMonitor.java| 17 +- .../java/org/apache/hadoop/yarn/util/Times.java | 33 +++ .../src/main/resources/yarn-default.xml | 4 +- .../amrmproxy/MockResourceManagerFacade.java| 9 + .../server/resourcemanager/ClientRMService.java | 137 +--- .../server/resourcemanager/RMAppManager.java| 37 .../server/resourcemanager/RMAuditLogger.java | 4 +- .../server/resourcemanager/RMServerUtils.java | 48 +++- .../resourcemanager/recovery/RMStateStore.java | 28 ++- .../recovery/RMStateUpdateAppEvent.java | 15 +- .../recovery/records/ApplicationStateData.java | 27 +++ .../impl/pb/ApplicationStateDataPBImpl.java | 86 .../server/resourcemanager/rmapp/RMApp.java | 3 + .../server/resourcemanager/rmapp/RMAppImpl.java | 64 +- .../rmapp/monitor/RMAppLifetimeMonitor.java | 75 +++ .../scheduler/capacity/CapacityScheduler.java | 3 +- .../yarn_server_resourcemanager_recovery.proto | 1 + .../applicationsmanager/MockAsm.java| 6 + .../server/resourcemanager/rmapp/MockRMApp.java | 6 + .../rmapp/TestApplicationLifetimeMonitor.java | 150 - 33 files changed, 1149 insertions(+), 124 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/a422740b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java index 255f998..65eac65 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java @@ -124,6 +124,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; @@ -485,6 +487,13 @@ public class TestClientRedirect { SignalContainerRequest request) throws IOException { return null; } + +@Override +public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( +UpdateApplicationTimeoutsRequest request) +throws YarnException, IOException { + return null; +} } class HistoryService extends AMService implements HSClientProtocol {
hadoop git commit: YARN-5856. Unnecessary duplicate start container request sent to NM State store. Contributed by Varun Saxena.
Repository: hadoop Updated Branches: refs/heads/trunk bcc15c629 -> de3a5f8d0 YARN-5856. Unnecessary duplicate start container request sent to NM State store. Contributed by Varun Saxena. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de3a5f8d Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de3a5f8d Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de3a5f8d Branch: refs/heads/trunk Commit: de3a5f8d08f64d0c2021a84b40e63e716da2321c Parents: bcc15c6 Author: NaganarasimhaAuthored: Thu Nov 10 05:39:20 2016 +0530 Committer: Naganarasimha Committed: Thu Nov 10 05:42:30 2016 +0530 -- .../server/nodemanager/containermanager/ContainerManagerImpl.java | 2 -- 1 file changed, 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/de3a5f8d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index ab5827e..c7810f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -1034,8 +1034,6 @@ public class ContainerManagerImpl extends CompositeService implements containerTokenIdentifier.getVersion(), request); dispatcher.getEventHandler().handle( new ApplicationContainerInitEvent(container)); -this.context.getNMStateStore().storeContainer(containerId, -containerTokenIdentifier.getVersion(), request); this.context.getContainerTokenSecretManager().startContainerSuccessful( containerTokenIdentifier); - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[1/2] hadoop git commit: YARN-5611. Provide an API to update lifetime of an application. Contributed by Rohith Sharma K S
Repository: hadoop Updated Branches: refs/heads/trunk edbee9e60 -> bcc15c629 http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc15c62/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java index e550c97..d194204 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/monitor/RMAppLifetimeMonitor.java @@ -18,9 +18,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp.monitor; -import java.util.EnumSet; -import java.util.HashMap; import java.util.Map; +import java.util.Map.Entry; import java.util.Set; import org.apache.commons.logging.Log; @@ -33,7 +32,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.util.AbstractLivelinessMonitor; import org.apache.hadoop.yarn.util.SystemClock; @@ -47,12 +45,6 @@ public class RMAppLifetimeMonitor private static final Log LOG = LogFactory.getLog(RMAppLifetimeMonitor.class); private RMContext rmContext; - private MapmonitoredApps = - new HashMap (); - - private static final EnumSet COMPLETED_APP_STATES = - EnumSet.of(RMAppState.FINISHED, RMAppState.FINISHING, RMAppState.FAILED, - RMAppState.KILLED, RMAppState.FINAL_SAVING, RMAppState.KILLING); public RMAppLifetimeMonitor(RMContext rmContext) { super(RMAppLifetimeMonitor.class.getName(), SystemClock.getInstance()); @@ -61,14 +53,16 @@ public class RMAppLifetimeMonitor @Override protected void serviceInit(Configuration conf) throws Exception { -long monitorInterval = conf.getLong( -YarnConfiguration.RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS, -YarnConfiguration.DEFAULT_RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS); +long monitorInterval = +conf.getLong(YarnConfiguration.RM_APPLICATION_MONITOR_INTERVAL_MS, +YarnConfiguration.DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS); if (monitorInterval <= 0) { monitorInterval = - YarnConfiguration.DEFAULT_RM_APPLICATION_LIFETIME_MONITOR_INTERVAL_MS; + YarnConfiguration.DEFAULT_RM_APPLICATION_MONITOR_INTERVAL_MS; } setMonitorInterval(monitorInterval); +setExpireInterval(0); // No need of expire interval for App. +setResetTimeOnStart(false); // do not reset expire time on restart LOG.info("Application lifelime monitor interval set to " + monitorInterval + " ms."); super.serviceInit(conf); @@ -77,54 +71,42 @@ public class RMAppLifetimeMonitor @SuppressWarnings("unchecked") @Override protected synchronized void expire(RMAppToMonitor monitoredAppKey) { -Long remove = monitoredApps.remove(monitoredAppKey); ApplicationId appId = monitoredAppKey.getApplicationId(); RMApp app = rmContext.getRMApps().get(appId); if (app == null) { return; } -// Don't trigger a KILL event if application is in completed states -if (!COMPLETED_APP_STATES.contains(app.getState())) { - String diagnostics = - "Application killed due to exceeding its lifetime period " + remove - + " milliseconds"; - rmContext.getDispatcher().getEventHandler() - .handle(new RMAppEvent(appId, RMAppEventType.KILL, diagnostics)); -} else { - LOG.info("Application " + appId - + " is about to complete. So not killing the application."); -} +String diagnostics = +"Application killed due to exceeding its lifetime period"; +rmContext.getDispatcher().getEventHandler() +.handle(new RMAppEvent(appId, RMAppEventType.KILL, diagnostics)); } - public synchronized void registerApp(ApplicationId appId, - ApplicationTimeoutType timeoutType, long monitorStartTime, long timeout) { + public void registerApp(ApplicationId appId, + ApplicationTimeoutType timeoutType, long
[2/2] hadoop git commit: YARN-5611. Provide an API to update lifetime of an application. Contributed by Rohith Sharma K S
YARN-5611. Provide an API to update lifetime of an application. Contributed by Rohith Sharma K S Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcc15c62 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcc15c62 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcc15c62 Branch: refs/heads/trunk Commit: bcc15c6290b3912a054323695a6a931b0de163bd Parents: edbee9e Author: Jian HeAuthored: Wed Nov 9 14:33:58 2016 -0800 Committer: Jian He Committed: Wed Nov 9 16:08:05 2016 -0800 -- .../hadoop/mapred/TestClientRedirect.java | 9 + .../yarn/api/ApplicationClientProtocol.java | 23 ++ .../UpdateApplicationTimeoutsRequest.java | 81 +++ .../UpdateApplicationTimeoutsResponse.java | 46 .../records/ApplicationSubmissionContext.java | 4 + .../hadoop/yarn/conf/YarnConfiguration.java | 6 +- .../main/proto/applicationclient_protocol.proto | 1 + .../src/main/proto/yarn_protos.proto| 5 + .../src/main/proto/yarn_service_protos.proto| 9 + .../ApplicationClientProtocolPBClientImpl.java | 21 +- .../ApplicationClientProtocolPBServiceImpl.java | 22 ++ .../UpdateApplicationTimeoutsRequestPBImpl.java | 220 +++ ...UpdateApplicationTimeoutsResponsePBImpl.java | 73 ++ .../yarn/util/AbstractLivelinessMonitor.java| 17 +- .../java/org/apache/hadoop/yarn/util/Times.java | 33 +++ .../src/main/resources/yarn-default.xml | 4 +- .../amrmproxy/MockResourceManagerFacade.java| 9 + .../server/resourcemanager/ClientRMService.java | 137 +--- .../server/resourcemanager/RMAppManager.java| 37 .../server/resourcemanager/RMAuditLogger.java | 4 +- .../server/resourcemanager/RMServerUtils.java | 48 +++- .../resourcemanager/recovery/RMStateStore.java | 28 ++- .../recovery/RMStateUpdateAppEvent.java | 15 +- .../recovery/records/ApplicationStateData.java | 27 +++ .../impl/pb/ApplicationStateDataPBImpl.java | 86 .../server/resourcemanager/rmapp/RMApp.java | 3 + .../server/resourcemanager/rmapp/RMAppImpl.java | 64 +- .../rmapp/monitor/RMAppLifetimeMonitor.java | 72 +++--- .../scheduler/capacity/CapacityScheduler.java | 3 +- .../yarn_server_resourcemanager_recovery.proto | 1 + .../applicationsmanager/MockAsm.java| 6 + .../server/resourcemanager/rmapp/MockRMApp.java | 6 + .../rmapp/TestApplicationLifetimeMonitor.java | 150 - 33 files changed, 1149 insertions(+), 121 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcc15c62/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java -- diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java index 255f998..65eac65 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java @@ -124,6 +124,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityRequest; import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationPriorityResponse; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.UpdateApplicationTimeoutsResponse; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; @@ -485,6 +487,13 @@ public class TestClientRedirect { SignalContainerRequest request) throws IOException { return null; } + +@Override +public UpdateApplicationTimeoutsResponse updateApplicationTimeouts( +UpdateApplicationTimeoutsRequest request) +throws YarnException, IOException { + return null; +} } class HistoryService extends AMService implements HSClientProtocol {
hadoop git commit: YARN-4498. Application level node labels stats to be available in REST (addendum patch). Contributed by Bibin A Chundatt.
Repository: hadoop Updated Branches: refs/heads/branch-2.8 1d75da8e2 -> 108f09a76 YARN-4498. Application level node labels stats to be available in REST (addendum patch). Contributed by Bibin A Chundatt. (cherry picked from commit 8a2998c08c614f9c64f012f40d31404fe9217e63) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/108f09a7 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/108f09a7 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/108f09a7 Branch: refs/heads/branch-2.8 Commit: 108f09a76341485096d1d3a97d2ce87a47293997 Parents: 1d75da8 Author: NaganarasimhaAuthored: Thu Nov 10 05:21:07 2016 +0530 Committer: Naganarasimha Committed: Thu Nov 10 05:24:12 2016 +0530 -- .../yarn/server/resourcemanager/webapp/dao/AppInfo.java | 4 ++-- .../webapp/TestRMWebServiceAppsNodelabel.java | 10 -- 2 files changed, 10 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/108f09a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java index a91c212..d35d52b4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java @@ -106,7 +106,7 @@ public class AppInfo { protected String appNodeLabelExpression; protected String amNodeLabelExpression; - protected ResourcesInfo resourceInfo; + protected ResourcesInfo resourceInfo = null; public AppInfo() { } // JAXB needs this @@ -219,7 +219,7 @@ public class AppInfo { .getApplicationAttempt(attempt.getAppAttemptId()); resourceInfo = null != ficaAppAttempt ? new ResourcesInfo(ficaAppAttempt.getSchedulingResourceUsage()) - : new ResourcesInfo(); + : null; } } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/108f09a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java index 5c99614e..0f2b6ea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java @@ -19,6 +19,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.util.ArrayList; import java.util.HashSet; @@ -157,8 +159,12 @@ public class TestRMWebServiceAppsNodelabel extends JerseyTestBase { JSONObject json = response.getEntity(JSONObject.class); JSONObject apps = json.getJSONObject("apps"); assertEquals("incorrect number of elements", 1, apps.length()); -Object object = apps.getJSONArray("app").getJSONObject(0).get("resourceInfo"); -Assert.assertTrue("For finshed app null expected", object.equals(null)); +try { + apps.getJSONArray("app").getJSONObject(0).getJSONObject("resourceInfo"); + fail("resourceInfo object shouldnt be available for finished apps"); +} catch (Exception e) { + assertTrue("resourceInfo shouldn't be available for finished apps", true); +} rm.stop(); }
hadoop git commit: YARN-4498. Application level node labels stats to be available in REST (addendum patch). Contributed by Bibin A Chundatt.
Repository: hadoop Updated Branches: refs/heads/branch-2 16430c64b -> 8a2998c08 YARN-4498. Application level node labels stats to be available in REST (addendum patch). Contributed by Bibin A Chundatt. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a2998c0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a2998c0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a2998c0 Branch: refs/heads/branch-2 Commit: 8a2998c08c614f9c64f012f40d31404fe9217e63 Parents: 16430c6 Author: NaganarasimhaAuthored: Thu Nov 10 05:21:07 2016 +0530 Committer: Naganarasimha Committed: Thu Nov 10 05:21:07 2016 +0530 -- .../yarn/server/resourcemanager/webapp/dao/AppInfo.java | 4 ++-- .../webapp/TestRMWebServiceAppsNodelabel.java | 10 -- 2 files changed, 10 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a2998c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java index 3bd6cff..19cbe43 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java @@ -111,7 +111,7 @@ public class AppInfo { protected String appNodeLabelExpression; protected String amNodeLabelExpression; - protected ResourcesInfo resourceInfo; + protected ResourcesInfo resourceInfo = null; public AppInfo() { } // JAXB needs this @@ -232,7 +232,7 @@ public class AppInfo { .getApplicationAttempt(attempt.getAppAttemptId()); resourceInfo = null != ficaAppAttempt ? new ResourcesInfo(ficaAppAttempt.getSchedulingResourceUsage()) - : new ResourcesInfo(); + : null; } } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a2998c0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java index 5c99614e..0f2b6ea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java @@ -19,6 +19,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.util.ArrayList; import java.util.HashSet; @@ -157,8 +159,12 @@ public class TestRMWebServiceAppsNodelabel extends JerseyTestBase { JSONObject json = response.getEntity(JSONObject.class); JSONObject apps = json.getJSONObject("apps"); assertEquals("incorrect number of elements", 1, apps.length()); -Object object = apps.getJSONArray("app").getJSONObject(0).get("resourceInfo"); -Assert.assertTrue("For finshed app null expected", object.equals(null)); +try { + apps.getJSONArray("app").getJSONObject(0).getJSONObject("resourceInfo"); + fail("resourceInfo object shouldnt be available for finished apps"); +} catch (Exception e) { + assertTrue("resourceInfo shouldn't be available for finished apps", true); +} rm.stop(); } - To unsubscribe, e-mail:
hadoop git commit: YARN-4498. Application level node labels stats to be available in REST (addendum patch). Contributed by Bibin A Chundatt.
Repository: hadoop Updated Branches: refs/heads/trunk 59ee8b7a8 -> edbee9e60 YARN-4498. Application level node labels stats to be available in REST (addendum patch). Contributed by Bibin A Chundatt. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edbee9e6 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edbee9e6 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edbee9e6 Branch: refs/heads/trunk Commit: edbee9e609e7f31d188660717ff9d3fb9f606abb Parents: 59ee8b7 Author: NaganarasimhaAuthored: Thu Nov 10 05:00:05 2016 +0530 Committer: Naganarasimha Committed: Thu Nov 10 05:00:05 2016 +0530 -- .../server/resourcemanager/webapp/dao/AppInfo.java| 4 ++-- .../webapp/TestRMWebServiceAppsNodelabel.java | 14 +- 2 files changed, 11 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/edbee9e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java index 3bd6cff..19cbe43 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java @@ -111,7 +111,7 @@ public class AppInfo { protected String appNodeLabelExpression; protected String amNodeLabelExpression; - protected ResourcesInfo resourceInfo; + protected ResourcesInfo resourceInfo = null; public AppInfo() { } // JAXB needs this @@ -232,7 +232,7 @@ public class AppInfo { .getApplicationAttempt(attempt.getAppAttemptId()); resourceInfo = null != ficaAppAttempt ? new ResourcesInfo(ficaAppAttempt.getSchedulingResourceUsage()) - : new ResourcesInfo(); + : null; } } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/edbee9e6/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java index a931b0b..25a712c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServiceAppsNodelabel.java @@ -19,10 +19,11 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.junit.Assert.assertTrue; import java.util.ArrayList; import java.util.HashSet; -import java.util.Iterator; import java.util.Set; import javax.ws.rs.core.MediaType; @@ -153,10 +154,13 @@ public class TestRMWebServiceAppsNodelabel extends JerseyTestBase { JSONObject json = response.getEntity(JSONObject.class); JSONObject apps = json.getJSONObject("apps"); assertEquals("incorrect number of elements", 1, apps.length()); -JSONObject jsonObject = - apps.getJSONArray("app").getJSONObject(0).getJSONObject("resourceInfo"); -Iterator keys = jsonObject.keys(); -assertEquals("For finshed app no values expected", false, keys.hasNext()); +try { + apps.getJSONArray("app").getJSONObject(0).getJSONObject("resourceInfo"); + fail("resourceInfo object shouldnt be available for finished apps"); +} catch (Exception e) { + assertTrue("resourceInfo shouldn't be available for finished apps", +
[hadoop] Git Push Summary
Repository: hadoop Updated Branches: refs/heads/+branch-2.6.0 [deleted] e3496499e - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[hadoop] Git Push Summary
Repository: hadoop Updated Branches: refs/heads/branch-2.6.0-DT [deleted] 37b2bc8c4 - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-11056. Concurrent append and read operations lead to checksum error. Contributed by Wei-Chiu Chuang.
Repository: hadoop Updated Branches: refs/heads/branch-2.8 b1b609c00 -> 1d75da8e2 HDFS-11056. Concurrent append and read operations lead to checksum error. Contributed by Wei-Chiu Chuang. (cherry picked from commit 16430c64b788bce6997f3a9e4401a2914f37b959) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d75da8e Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d75da8e Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d75da8e Branch: refs/heads/branch-2.8 Commit: 1d75da8e2694fe7cafb1ba52fd1efaff23259f02 Parents: b1b609c Author: Wei-Chiu ChuangAuthored: Wed Nov 9 13:28:16 2016 -0800 Committer: Wei-Chiu Chuang Committed: Wed Nov 9 13:32:35 2016 -0800 -- .../datanode/fsdataset/impl/FsDatasetImpl.java | 41 +++- .../org/apache/hadoop/hdfs/TestFileAppend.java | 69 .../fsdataset/impl/FsDatasetImplTestUtils.java | 15 + 3 files changed, 124 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d75da8e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index ce3c144..c2efb4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -1168,7 +1168,30 @@ class FsDatasetImpl implements FsDatasetSpi { return new ReplicaHandler(replica, ref); } } - + + + private byte[] loadLastPartialChunkChecksum( + File blockFile, File metaFile) throws IOException { +DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum(); +final int checksumSize = dcs.getChecksumSize(); +final long onDiskLen = blockFile.length(); +final int bytesPerChecksum = dcs.getBytesPerChecksum(); + +if (onDiskLen % bytesPerChecksum == 0) { + // the last chunk is a complete one. No need to preserve its checksum + // because it will not be modified. + return null; +} + +int offsetInChecksum = BlockMetadataHeader.getHeaderSize() + +(int)(onDiskLen / bytesPerChecksum * checksumSize); +byte[] lastChecksum = new byte[checksumSize]; +RandomAccessFile raf = new RandomAccessFile(metaFile, "r"); +raf.seek(offsetInChecksum); +raf.read(lastChecksum, 0, checksumSize); +return lastChecksum; + } + /** Append to a finalized replica * Change a finalized replica to be a RBW replica and * bump its generation stamp to be the newGS @@ -1205,6 +1228,13 @@ class FsDatasetImpl implements FsDatasetSpi { ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten( replicaInfo.getBlockId(), replicaInfo.getNumBytes(), newGS, v, newBlkFile.getParentFile(), Thread.currentThread(), bytesReserved); + + // load last checksum and datalen + byte[] lastChunkChecksum = loadLastPartialChunkChecksum( + replicaInfo.getBlockFile(), replicaInfo.getMetaFile()); + newReplicaInfo.setLastChecksumAndDataLen( + replicaInfo.getNumBytes(), lastChunkChecksum); + File newmeta = newReplicaInfo.getMetaFile(); // rename meta file to rbw directory @@ -1584,6 +1614,12 @@ class FsDatasetImpl implements FsDatasetSpi { blockId, numBytes, expectedGs, v, dest.getParentFile(), Thread.currentThread(), 0); rbw.setBytesAcked(visible); + + // load last checksum and datalen + final File destMeta = FsDatasetUtil.getMetaFile(dest, + b.getGenerationStamp()); + byte[] lastChunkChecksum = loadLastPartialChunkChecksum(dest, destMeta); + rbw.setLastChecksumAndDataLen(numBytes, lastChunkChecksum); // overwrite the RBW in the volume map volumeMap.add(b.getBlockPoolId(), rbw); return rbw; @@ -2666,6 +2702,9 @@ class FsDatasetImpl implements FsDatasetSpi { newBlockId, recoveryId, volume, blockFile.getParentFile(), newlength); newReplicaInfo.setNumBytes(newlength); +// In theory, this rbw replica needs to reload last chunk checksum, +// but it is immediately converted to finalized state within the same +// lock, so no need to update it. volumeMap.add(bpid, newReplicaInfo); finalizeReplica(bpid, newReplicaInfo); }
[hadoop] Git Push Summary [forced push!] [Forced Update!]
Repository: hadoop Updated Branches: refs/heads/branch-2.6.0 37b2bc8c4 -> e3496499e (forced update) - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[hadoop] Git Push Summary
Repository: hadoop Updated Branches: refs/heads/+branch-2.6.0 [created] e3496499e - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HDFS-11056. Concurrent append and read operations lead to checksum error. Contributed by Wei-Chiu Chuang.
Repository: hadoop Updated Branches: refs/heads/branch-2 42147bbbf -> 16430c64b HDFS-11056. Concurrent append and read operations lead to checksum error. Contributed by Wei-Chiu Chuang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16430c64 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16430c64 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16430c64 Branch: refs/heads/branch-2 Commit: 16430c64b788bce6997f3a9e4401a2914f37b959 Parents: 42147bb Author: Wei-Chiu ChuangAuthored: Wed Nov 9 13:28:16 2016 -0800 Committer: Wei-Chiu Chuang Committed: Wed Nov 9 13:28:16 2016 -0800 -- .../datanode/fsdataset/impl/FsDatasetImpl.java | 41 +++- .../org/apache/hadoop/hdfs/TestFileAppend.java | 69 .../fsdataset/impl/FsDatasetImplTestUtils.java | 15 + 3 files changed, 124 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/16430c64/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 5bf5612..7abccb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -1170,7 +1170,30 @@ class FsDatasetImpl implements FsDatasetSpi { return new ReplicaHandler(replica, ref); } } - + + + private byte[] loadLastPartialChunkChecksum( + File blockFile, File metaFile) throws IOException { +DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum(); +final int checksumSize = dcs.getChecksumSize(); +final long onDiskLen = blockFile.length(); +final int bytesPerChecksum = dcs.getBytesPerChecksum(); + +if (onDiskLen % bytesPerChecksum == 0) { + // the last chunk is a complete one. No need to preserve its checksum + // because it will not be modified. + return null; +} + +int offsetInChecksum = BlockMetadataHeader.getHeaderSize() + +(int)(onDiskLen / bytesPerChecksum * checksumSize); +byte[] lastChecksum = new byte[checksumSize]; +RandomAccessFile raf = new RandomAccessFile(metaFile, "r"); +raf.seek(offsetInChecksum); +raf.read(lastChecksum, 0, checksumSize); +return lastChecksum; + } + /** Append to a finalized replica * Change a finalized replica to be a RBW replica and * bump its generation stamp to be the newGS @@ -1207,6 +1230,13 @@ class FsDatasetImpl implements FsDatasetSpi { ReplicaBeingWritten newReplicaInfo = new ReplicaBeingWritten( replicaInfo.getBlockId(), replicaInfo.getNumBytes(), newGS, v, newBlkFile.getParentFile(), Thread.currentThread(), bytesReserved); + + // load last checksum and datalen + byte[] lastChunkChecksum = loadLastPartialChunkChecksum( + replicaInfo.getBlockFile(), replicaInfo.getMetaFile()); + newReplicaInfo.setLastChecksumAndDataLen( + replicaInfo.getNumBytes(), lastChunkChecksum); + File newmeta = newReplicaInfo.getMetaFile(); // rename meta file to rbw directory @@ -1586,6 +1616,12 @@ class FsDatasetImpl implements FsDatasetSpi { blockId, numBytes, expectedGs, v, dest.getParentFile(), Thread.currentThread(), 0); rbw.setBytesAcked(visible); + + // load last checksum and datalen + final File destMeta = FsDatasetUtil.getMetaFile(dest, + b.getGenerationStamp()); + byte[] lastChunkChecksum = loadLastPartialChunkChecksum(dest, destMeta); + rbw.setLastChecksumAndDataLen(numBytes, lastChunkChecksum); // overwrite the RBW in the volume map volumeMap.add(b.getBlockPoolId(), rbw); return rbw; @@ -2668,6 +2704,9 @@ class FsDatasetImpl implements FsDatasetSpi { newBlockId, recoveryId, volume, blockFile.getParentFile(), newlength); newReplicaInfo.setNumBytes(newlength); +// In theory, this rbw replica needs to reload last chunk checksum, +// but it is immediately converted to finalized state within the same +// lock, so no need to update it. volumeMap.add(bpid, newReplicaInfo); finalizeReplica(bpid, newReplicaInfo); }
[hadoop] Git Push Summary
Repository: hadoop Updated Branches: refs/heads/branch-2.6.0-DT [created] 37b2bc8c4 - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: Revert "CLOUDERA-BUILD. Set the Fair Scheduler as the default scheduler."
Repository: hadoop Updated Branches: refs/heads/branch-2.6.0 44fdd1414 -> 37b2bc8c4 Revert "CLOUDERA-BUILD. Set the Fair Scheduler as the default scheduler." This reverts commit 44fdd141468f72082c36d24151327ada19da8e46. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37b2bc8c Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37b2bc8c Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37b2bc8c Branch: refs/heads/branch-2.6.0 Commit: 37b2bc8c469902c7de3836a333f388a65977c650 Parents: 44fdd14 Author: Daniel TempletonAuthored: Wed Nov 9 13:20:51 2016 -0800 Committer: Daniel Templeton Committed: Wed Nov 9 13:20:51 2016 -0800 -- .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 2 +- .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/37b2bc8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 6b991e7..83cbfc5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -331,7 +331,7 @@ public class YarnConfiguration extends Configuration { RM_PREFIX + "scheduler.class"; public static final String DEFAULT_RM_SCHEDULER = - "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler"; + "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler"; /** RM set next Heartbeat interval for NM */ public static final String RM_NM_HEARTBEAT_INTERVAL_MS = http://git-wip-us.apache.org/repos/asf/hadoop/blob/37b2bc8c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index d47ddca..9d37196 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -224,7 +224,7 @@ The class to use as the resource scheduler. yarn.resourcemanager.scheduler.class - org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler + org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[3/3] hadoop git commit: YARN-4329. [YARN-5437] Allow fetching exact reason as to why a submitted app is in ACCEPTED state in Fair Scheduler (Contributed by Yufei Gu)
YARN-4329. [YARN-5437] Allow fetching exact reason as to why a submitted app is in ACCEPTED state in Fair Scheduler (Contributed by Yufei Gu) (cherry picked from commit 59ee8b7a88603e94b5661a8d5d088f7aa99fe049) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42147bbb Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42147bbb Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42147bbb Branch: refs/heads/branch-2 Commit: 42147bbbf73996504704d563e541fa1efb1b3b42 Parents: ede9091 Author: Daniel TempletonAuthored: Wed Nov 9 13:11:37 2016 -0800 Committer: Daniel Templeton Committed: Wed Nov 9 13:18:28 2016 -0800 -- .../scheduler/fair/FSAppAttempt.java| 71 +++- .../scheduler/fair/FairScheduler.java | 4 +- .../scheduler/fair/MaxRunningAppsEnforcer.java | 50 -- .../fair/TestMaxRunningAppsEnforcer.java| 2 +- 4 files changed, 103 insertions(+), 24 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/42147bbb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java index bc988c9..0686bc2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java @@ -766,8 +766,18 @@ public class FSAppAttempt extends SchedulerApplicationAttempt // The desired container won't fit here, so reserve if (isReservable(capability) && reserve(request, node, reservedContainer, type, schedulerKey)) { + if (isWaitingForAMContainer()) { +updateAMDiagnosticMsg(capability, +" exceed the available resources of the node and the request is" ++ " reserved"); + } return FairScheduler.CONTAINER_RESERVED; } else { + if (isWaitingForAMContainer()) { +updateAMDiagnosticMsg(capability, +" exceed the available resources of the node and the request cannot" ++ " be reserved"); + } if (LOG.isDebugEnabled()) { LOG.debug("Couldn't creating reservation for " + getName() + ",at priority " + request.getPriority()); @@ -920,23 +930,31 @@ public class FSAppAttempt extends SchedulerApplicationAttempt ResourceRequest rackRequest = getResourceRequest(key, node.getRackName()); ResourceRequest nodeRequest = getResourceRequest(key, node.getNodeName()); -return -// There must be outstanding requests at the given priority: +boolean ret = true; +if (!(// There must be outstanding requests at the given priority: anyRequest != null && anyRequest.getNumContainers() > 0 && -// If locality relaxation is turned off at *-level, there must be a -// non-zero request for the node's rack: -(anyRequest.getRelaxLocality() || -(rackRequest != null && rackRequest.getNumContainers() > 0)) && -// If locality relaxation is turned off at rack-level, there must be a -// non-zero request at the node: -(rackRequest == null || rackRequest.getRelaxLocality() || -(nodeRequest != null && nodeRequest.getNumContainers() > 0)) && -// The requested container must be able to fit on the node: -Resources.lessThanOrEqual(RESOURCE_CALCULATOR, null, -anyRequest.getCapability(), -node.getRMNode().getTotalCapability()) && -// The requested container must fit in queue maximum share: -getQueue().fitsInMaxShare(anyRequest.getCapability()); +// If locality relaxation is turned off at *-level, there must be a +// non-zero request for the node's rack: +(anyRequest.getRelaxLocality() || +(rackRequest != null && rackRequest.getNumContainers() > 0)) && +// If locality relaxation is turned off at rack-level, there must be a +// non-zero request at the node: +(rackRequest ==
[1/3] hadoop git commit: CLOUDERA-BUILD. Set the Fair Scheduler as the default scheduler.
Repository: hadoop Updated Branches: refs/heads/branch-2 ede909144 -> 42147bbbf refs/heads/branch-2.6.0 e3496499e -> 44fdd1414 refs/heads/trunk 822ae88f7 -> 59ee8b7a8 CLOUDERA-BUILD. Set the Fair Scheduler as the default scheduler. (cherry picked from commit 2608afcdbb6efa3bbf496c2b70a7e2ccde4dd0ba) (cherry picked from commit 30f7dad019e7de2b974360de97a9e841ea5663cc) (cherry picked from commit e55941cca0971855b19a0d42c0ec510d4d464027) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44fdd141 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44fdd141 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44fdd141 Branch: refs/heads/branch-2.6.0 Commit: 44fdd141468f72082c36d24151327ada19da8e46 Parents: e349649 Author: Sandy RyzaAuthored: Fri Jun 21 16:35:24 2013 -0700 Committer: Daniel Templeton Committed: Sun Sep 4 07:24:08 2016 -0700 -- .../main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java | 2 +- .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/44fdd141/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 83cbfc5..6b991e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -331,7 +331,7 @@ public class YarnConfiguration extends Configuration { RM_PREFIX + "scheduler.class"; public static final String DEFAULT_RM_SCHEDULER = - "org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler"; + "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler"; /** RM set next Heartbeat interval for NM */ public static final String RM_NM_HEARTBEAT_INTERVAL_MS = http://git-wip-us.apache.org/repos/asf/hadoop/blob/44fdd141/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 9d37196..d47ddca 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -224,7 +224,7 @@ The class to use as the resource scheduler. yarn.resourcemanager.scheduler.class - org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler + org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
[2/3] hadoop git commit: YARN-4329. [YARN-5437] Allow fetching exact reason as to why a submitted app is in ACCEPTED state in Fair Scheduler (Contributed by Yufei Gu)
YARN-4329. [YARN-5437] Allow fetching exact reason as to why a submitted app is in ACCEPTED state in Fair Scheduler (Contributed by Yufei Gu) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/59ee8b7a Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/59ee8b7a Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/59ee8b7a Branch: refs/heads/trunk Commit: 59ee8b7a88603e94b5661a8d5d088f7aa99fe049 Parents: 822ae88 Author: Daniel TempletonAuthored: Wed Nov 9 13:11:37 2016 -0800 Committer: Daniel Templeton Committed: Wed Nov 9 13:11:37 2016 -0800 -- .../scheduler/fair/FSAppAttempt.java| 71 +++- .../scheduler/fair/FairScheduler.java | 4 +- .../scheduler/fair/MaxRunningAppsEnforcer.java | 50 -- .../fair/TestMaxRunningAppsEnforcer.java| 2 +- 4 files changed, 103 insertions(+), 24 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/59ee8b7a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java index 11922d9..df20117 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java @@ -766,8 +766,18 @@ public class FSAppAttempt extends SchedulerApplicationAttempt // The desired container won't fit here, so reserve if (isReservable(capability) && reserve(request, node, reservedContainer, type, schedulerKey)) { + if (isWaitingForAMContainer()) { +updateAMDiagnosticMsg(capability, +" exceed the available resources of the node and the request is" ++ " reserved"); + } return FairScheduler.CONTAINER_RESERVED; } else { + if (isWaitingForAMContainer()) { +updateAMDiagnosticMsg(capability, +" exceed the available resources of the node and the request cannot" ++ " be reserved"); + } if (LOG.isDebugEnabled()) { LOG.debug("Couldn't creating reservation for " + getName() + ",at priority " + request.getPriority()); @@ -920,23 +930,31 @@ public class FSAppAttempt extends SchedulerApplicationAttempt ResourceRequest rackRequest = getResourceRequest(key, node.getRackName()); ResourceRequest nodeRequest = getResourceRequest(key, node.getNodeName()); -return -// There must be outstanding requests at the given priority: +boolean ret = true; +if (!(// There must be outstanding requests at the given priority: anyRequest != null && anyRequest.getNumContainers() > 0 && -// If locality relaxation is turned off at *-level, there must be a -// non-zero request for the node's rack: -(anyRequest.getRelaxLocality() || -(rackRequest != null && rackRequest.getNumContainers() > 0)) && -// If locality relaxation is turned off at rack-level, there must be a -// non-zero request at the node: -(rackRequest == null || rackRequest.getRelaxLocality() || -(nodeRequest != null && nodeRequest.getNumContainers() > 0)) && -// The requested container must be able to fit on the node: -Resources.lessThanOrEqual(RESOURCE_CALCULATOR, null, -anyRequest.getCapability(), -node.getRMNode().getTotalCapability()) && -// The requested container must fit in queue maximum share: -getQueue().fitsInMaxShare(anyRequest.getCapability()); +// If locality relaxation is turned off at *-level, there must be a +// non-zero request for the node's rack: +(anyRequest.getRelaxLocality() || +(rackRequest != null && rackRequest.getNumContainers() > 0)) && +// If locality relaxation is turned off at rack-level, there must be a +// non-zero request at the node: +(rackRequest == null || rackRequest.getRelaxLocality() || +(nodeRequest != null &&
hadoop git commit: HADOOP-13346. DelegationTokenAuthenticationHandler writes via closed writer. Contributed by Gregory Chanan and Hrishikesh Gadre.
Repository: hadoop Updated Branches: refs/heads/trunk c619e9b43 -> 822ae88f7 HADOOP-13346. DelegationTokenAuthenticationHandler writes via closed writer. Contributed by Gregory Chanan and Hrishikesh Gadre. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/822ae88f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/822ae88f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/822ae88f Branch: refs/heads/trunk Commit: 822ae88f7da638e15a25747f6965caee8198aca6 Parents: c619e9b Author: Xiao ChenAuthored: Wed Nov 9 09:32:15 2016 -0800 Committer: Xiao Chen Committed: Wed Nov 9 09:33:00 2016 -0800 -- .../DelegationTokenAuthenticationHandler.java | 32 - ...tionTokenAuthenticationHandlerWithMocks.java | 50 2 files changed, 81 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/822ae88f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java index c23a94f..315c9d6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java @@ -48,6 +48,8 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdenti import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSecretManager; import org.apache.hadoop.util.HttpExceptionUtils; import org.apache.hadoop.util.StringUtils; +import org.codehaus.jackson.JsonFactory; +import org.codehaus.jackson.JsonGenerator; import org.codehaus.jackson.map.ObjectMapper; import com.google.common.annotations.VisibleForTesting; @@ -89,6 +91,8 @@ public abstract class DelegationTokenAuthenticationHandler public static final String DELEGATION_TOKEN_UGI_ATTRIBUTE = "hadoop.security.delegation-token.ugi"; + public static final String JSON_MAPPER_PREFIX = PREFIX + "json-mapper."; + static { DELEGATION_TOKEN_OPS.add(KerberosDelegationTokenAuthenticator. DelegationTokenOperation.GETDELEGATIONTOKEN.toString()); @@ -101,6 +105,7 @@ public abstract class DelegationTokenAuthenticationHandler private AuthenticationHandler authHandler; private DelegationTokenManager tokenManager; private String authType; + private JsonFactory jsonFactory; public DelegationTokenAuthenticationHandler(AuthenticationHandler handler) { authHandler = handler; @@ -120,6 +125,7 @@ public abstract class DelegationTokenAuthenticationHandler public void init(Properties config) throws ServletException { authHandler.init(config); initTokenManager(config); +initJsonFactory(config); } /** @@ -153,6 +159,30 @@ public abstract class DelegationTokenAuthenticationHandler tokenManager.init(); } + @VisibleForTesting + public void initJsonFactory(Properties config) { +boolean hasFeature = false; +JsonFactory tmpJsonFactory = new JsonFactory(); + +for (Map.Entry entry : config.entrySet()) { + String key = (String)entry.getKey(); + if (key.startsWith(JSON_MAPPER_PREFIX)) { +JsonGenerator.Feature feature = +JsonGenerator.Feature.valueOf(key.substring(JSON_MAPPER_PREFIX +.length())); +if (feature != null) { + hasFeature = true; + boolean enabled = Boolean.parseBoolean((String)entry.getValue()); + tmpJsonFactory.configure(feature, enabled); +} + } +} + +if (hasFeature) { + jsonFactory = tmpJsonFactory; +} + } + @Override public void destroy() { tokenManager.destroy(); @@ -298,7 +328,7 @@ public abstract class DelegationTokenAuthenticationHandler if (map != null) { response.setContentType(MediaType.APPLICATION_JSON); Writer writer = response.getWriter(); - ObjectMapper jsonMapper = new ObjectMapper(); + ObjectMapper jsonMapper = new ObjectMapper(jsonFactory); jsonMapper.writeValue(writer, map); writer.write(ENTER); writer.flush();
hadoop git commit: HDFS-11056. Concurrent append and read operations lead to checksum error. Contributed by Wei-Chiu Chuang.
Repository: hadoop Updated Branches: refs/heads/trunk 367c3d412 -> c619e9b43 HDFS-11056. Concurrent append and read operations lead to checksum error. Contributed by Wei-Chiu Chuang. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c619e9b4 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c619e9b4 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c619e9b4 Branch: refs/heads/trunk Commit: c619e9b43fd00ba0e59a98ae09685ff719bb722b Parents: 367c3d4 Author: Wei-Chiu ChuangAuthored: Wed Nov 9 09:15:51 2016 -0800 Committer: Wei-Chiu Chuang Committed: Wed Nov 9 09:16:50 2016 -0800 -- .../datanode/fsdataset/impl/FsVolumeImpl.java | 41 +++ .../org/apache/hadoop/hdfs/TestFileAppend.java | 71 .../fsdataset/impl/FsDatasetImplTestUtils.java | 14 3 files changed, 126 insertions(+) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c619e9b4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java -- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java index 1627865..5880b3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java @@ -23,6 +23,7 @@ import java.io.FileOutputStream; import java.io.FilenameFilter; import java.io.IOException; import java.io.OutputStreamWriter; +import java.io.RandomAccessFile; import java.net.URI; import java.nio.channels.ClosedChannelException; import java.nio.file.Files; @@ -47,6 +48,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.DF; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.util.AutoCloseableLock; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtilClient; @@ -59,6 +61,7 @@ import org.apache.hadoop.hdfs.server.datanode.LocalReplica; import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.hdfs.server.datanode.ReplicaBuilder; import org.apache.hadoop.hdfs.server.datanode.LocalReplicaInPipeline; @@ -1102,6 +1105,28 @@ public class FsVolumeImpl implements FsVolumeSpi { } + private byte[] loadLastPartialChunkChecksum( + File blockFile, File metaFile) throws IOException { +DataChecksum dcs = BlockMetadataHeader.readHeader(metaFile).getChecksum(); +final int checksumSize = dcs.getChecksumSize(); +final long onDiskLen = blockFile.length(); +final int bytesPerChecksum = dcs.getBytesPerChecksum(); + +if (onDiskLen % bytesPerChecksum == 0) { + // the last chunk is a complete one. No need to preserve its checksum + // because it will not be modified. + return null; +} + +int offsetInChecksum = BlockMetadataHeader.getHeaderSize() + +(int)(onDiskLen / bytesPerChecksum * checksumSize); +byte[] lastChecksum = new byte[checksumSize]; +RandomAccessFile raf = new RandomAccessFile(metaFile, "r"); +raf.seek(offsetInChecksum); +raf.read(lastChecksum, 0, checksumSize); +return lastChecksum; + } + public ReplicaInPipeline append(String bpid, ReplicaInfo replicaInfo, long newGS, long estimateBlockLen) throws IOException { @@ -1126,6 +1151,13 @@ public class FsVolumeImpl implements FsVolumeSpi { .setBytesToReserve(bytesReserved) .buildLocalReplicaInPipeline(); +// load last checksum and datalen +LocalReplica localReplica = (LocalReplica)replicaInfo; +byte[] lastChunkChecksum = loadLastPartialChunkChecksum( +localReplica.getBlockFile(), localReplica.getMetaFile()); +newReplicaInfo.setLastChecksumAndDataLen( +replicaInfo.getNumBytes(), lastChunkChecksum); + // rename meta file to rbw directory // rename block file to rbw directory newReplicaInfo.moveReplicaFrom(replicaInfo, newBlkFile); @@ -1170,6 +1202,12 @@ public class FsVolumeImpl implements FsVolumeSpi { .setBytesToReserve(0) .buildLocalReplicaInPipeline();
hadoop git commit: HADOOP-13590. Retry until TGT expires even if the UGI renewal thread encountered exception.
Repository: hadoop Updated Branches: refs/heads/branch-2.8 ad992e05a -> b1b609c00 HADOOP-13590. Retry until TGT expires even if the UGI renewal thread encountered exception. (cherry picked from commit 367c3d41217728c2e61252c5a5235e5bc1f9822f) (cherry picked from commit ede909144da6fe8bde2f6561fa44a458674f96e2) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b1b609c0 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b1b609c0 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b1b609c0 Branch: refs/heads/branch-2.8 Commit: b1b609c00305383e569a8ef38b4ee91b5d257894 Parents: ad992e0 Author: Xiao ChenAuthored: Wed Nov 9 09:08:04 2016 -0800 Committer: Xiao Chen Committed: Wed Nov 9 09:10:06 2016 -0800 -- .../hadoop/security/UserGroupInformation.java | 70 ++- .../security/TestUserGroupInformation.java | 94 2 files changed, 160 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/b1b609c0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index e1f77ec..b224b07 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -41,6 +41,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import javax.security.auth.Subject; import javax.security.auth.callback.CallbackHandler; @@ -52,14 +53,18 @@ import javax.security.auth.login.LoginContext; import javax.security.auth.login.LoginException; import javax.security.auth.spi.LoginModule; +import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.security.SaslRpcServer.AuthMethod; @@ -83,7 +88,8 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase", "Hive", "Oozie"}) @InterfaceStability.Evolving public class UserGroupInformation { - private static final Logger LOG = LoggerFactory.getLogger( + @VisibleForTesting + static final Logger LOG = LoggerFactory.getLogger( UserGroupInformation.class); /** @@ -119,6 +125,10 @@ public class UserGroupInformation { MutableRate loginFailure; @Metric("GetGroups") MutableRate getGroups; MutableQuantiles[] getGroupsQuantiles; +@Metric("Renewal failures since startup") +private MutableGaugeLong renewalFailuresTotal; +@Metric("Renewal failures since last successful login") +private MutableGaugeInt renewalFailures; static UgiMetrics create() { return DefaultMetricsSystem.instance().register(new UgiMetrics()); @@ -132,6 +142,10 @@ public class UserGroupInformation { } } } + +MutableGaugeInt getRenewalFailures() { + return renewalFailures; +} } /** @@ -924,6 +938,7 @@ public class UserGroupInformation { return; } long nextRefresh = getRefreshTime(tgt); +RetryPolicy rp = null; while (true) { try { long now = Time.now(); @@ -947,13 +962,40 @@ public class UserGroupInformation { } nextRefresh = Math.max(getRefreshTime(tgt), now + kerberosMinSecondsBeforeRelogin); +metrics.renewalFailures.set(0); +rp = null; } catch (InterruptedException ie) { LOG.warn("Terminating renewal thread"); return; } catch (IOException ie) { -LOG.warn("Exception encountered while running the" + -" renewal command. Aborting renew thread. " + ie); -return; +
hadoop git commit: HADOOP-13590. Retry until TGT expires even if the UGI renewal thread encountered exception.
Repository: hadoop Updated Branches: refs/heads/trunk 280357c29 -> 367c3d412 HADOOP-13590. Retry until TGT expires even if the UGI renewal thread encountered exception. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/367c3d41 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/367c3d41 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/367c3d41 Branch: refs/heads/trunk Commit: 367c3d41217728c2e61252c5a5235e5bc1f9822f Parents: 280357c Author: Xiao ChenAuthored: Wed Nov 9 09:07:12 2016 -0800 Committer: Xiao Chen Committed: Wed Nov 9 09:07:12 2016 -0800 -- .../hadoop/security/UserGroupInformation.java | 70 - .../hadoop/security/TestUGIWithMiniKdc.java | 144 +++ .../security/TestUserGroupInformation.java | 93 3 files changed, 303 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/367c3d41/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 111c3f8..82603a4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -43,6 +43,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import javax.security.auth.Subject; import javax.security.auth.callback.CallbackHandler; @@ -54,14 +55,18 @@ import javax.security.auth.login.LoginContext; import javax.security.auth.login.LoginException; import javax.security.auth.spi.LoginModule; +import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.security.SaslRpcServer.AuthMethod; @@ -85,7 +90,8 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase", "Hive", "Oozie"}) @InterfaceStability.Evolving public class UserGroupInformation { - private static final Logger LOG = LoggerFactory.getLogger( + @VisibleForTesting + static final Logger LOG = LoggerFactory.getLogger( UserGroupInformation.class); /** @@ -121,6 +127,10 @@ public class UserGroupInformation { MutableRate loginFailure; @Metric("GetGroups") MutableRate getGroups; MutableQuantiles[] getGroupsQuantiles; +@Metric("Renewal failures since startup") +private MutableGaugeLong renewalFailuresTotal; +@Metric("Renewal failures since last successful login") +private MutableGaugeInt renewalFailures; static UgiMetrics create() { return DefaultMetricsSystem.instance().register(new UgiMetrics()); @@ -138,6 +148,10 @@ public class UserGroupInformation { } } } + +MutableGaugeInt getRenewalFailures() { + return renewalFailures; +} } /** @@ -963,6 +977,7 @@ public class UserGroupInformation { return; } long nextRefresh = getRefreshTime(tgt); +RetryPolicy rp = null; while (true) { try { long now = Time.now(); @@ -986,13 +1001,40 @@ public class UserGroupInformation { } nextRefresh = Math.max(getRefreshTime(tgt), now + kerberosMinSecondsBeforeRelogin); +metrics.renewalFailures.set(0); +rp = null; } catch (InterruptedException ie) { LOG.warn("Terminating renewal thread"); return; } catch (IOException ie) { -LOG.warn("Exception encountered while running the" + -" renewal command. Aborting renew thread. " + ie); -return; +metrics.renewalFailuresTotal.incr(); +final long tgtEndTime =
hadoop git commit: HADOOP-13590. Retry until TGT expires even if the UGI renewal thread encountered exception.
Repository: hadoop Updated Branches: refs/heads/branch-2 4a023b441 -> ede909144 HADOOP-13590. Retry until TGT expires even if the UGI renewal thread encountered exception. (cherry picked from commit 367c3d41217728c2e61252c5a5235e5bc1f9822f) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ede90914 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ede90914 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ede90914 Branch: refs/heads/branch-2 Commit: ede909144da6fe8bde2f6561fa44a458674f96e2 Parents: 4a023b4 Author: Xiao ChenAuthored: Wed Nov 9 09:08:04 2016 -0800 Committer: Xiao Chen Committed: Wed Nov 9 09:09:13 2016 -0800 -- .../hadoop/security/UserGroupInformation.java | 70 ++- .../security/TestUserGroupInformation.java | 94 2 files changed, 160 insertions(+), 4 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/ede90914/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java -- diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index 79e56e7..64c8460 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -43,6 +43,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.TimeUnit; import javax.security.auth.Subject; import javax.security.auth.callback.CallbackHandler; @@ -54,14 +55,18 @@ import javax.security.auth.login.LoginContext; import javax.security.auth.login.LoginException; import javax.security.auth.spi.LoginModule; +import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableGaugeInt; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.lib.MutableQuantiles; import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.security.SaslRpcServer.AuthMethod; @@ -85,7 +90,8 @@ import org.slf4j.LoggerFactory; @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce", "HBase", "Hive", "Oozie"}) @InterfaceStability.Evolving public class UserGroupInformation { - private static final Logger LOG = LoggerFactory.getLogger( + @VisibleForTesting + static final Logger LOG = LoggerFactory.getLogger( UserGroupInformation.class); /** @@ -121,6 +127,10 @@ public class UserGroupInformation { MutableRate loginFailure; @Metric("GetGroups") MutableRate getGroups; MutableQuantiles[] getGroupsQuantiles; +@Metric("Renewal failures since startup") +private MutableGaugeLong renewalFailuresTotal; +@Metric("Renewal failures since last successful login") +private MutableGaugeInt renewalFailures; static UgiMetrics create() { return DefaultMetricsSystem.instance().register(new UgiMetrics()); @@ -138,6 +148,10 @@ public class UserGroupInformation { } } } + +MutableGaugeInt getRenewalFailures() { + return renewalFailures; +} } /** @@ -961,6 +975,7 @@ public class UserGroupInformation { return; } long nextRefresh = getRefreshTime(tgt); +RetryPolicy rp = null; while (true) { try { long now = Time.now(); @@ -984,13 +999,40 @@ public class UserGroupInformation { } nextRefresh = Math.max(getRefreshTime(tgt), now + kerberosMinSecondsBeforeRelogin); +metrics.renewalFailures.set(0); +rp = null; } catch (InterruptedException ie) { LOG.warn("Terminating renewal thread"); return; } catch (IOException ie) { -LOG.warn("Exception encountered while running the" + -" renewal command. Aborting renew thread. " + ie); -return; +metrics.renewalFailuresTotal.incr(); +final long tgtEndTime =
hadoop git commit: YARN-5833. Addendum patch to include missing changes to yarn-default.xml
Repository: hadoop Updated Branches: refs/heads/branch-2 53ff3c9e0 -> 4a023b441 YARN-5833. Addendum patch to include missing changes to yarn-default.xml (cherry picked from commit 280357c29f867e3ef6386ea5bd0f7b7ca6fe04eb) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a023b44 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a023b44 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a023b44 Branch: refs/heads/branch-2 Commit: 4a023b44145a7fd51cc109e6751e824d1e521a0f Parents: 53ff3c9 Author: Arun SureshAuthored: Wed Nov 9 07:15:11 2016 -0800 Committer: Arun Suresh Committed: Wed Nov 9 07:16:39 2016 -0800 -- .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a023b44/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 59597f6..46c03e2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -2666,7 +2666,7 @@ The address of the AMRMProxyService listener. yarn.nodemanager.amrmproxy.address -0.0.0.0:8048 +0.0.0.0:8049 - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-5833. Addendum patch to include missing changes to yarn-default.xml
Repository: hadoop Updated Branches: refs/heads/trunk c07488009 -> 280357c29 YARN-5833. Addendum patch to include missing changes to yarn-default.xml Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/280357c2 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/280357c2 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/280357c2 Branch: refs/heads/trunk Commit: 280357c29f867e3ef6386ea5bd0f7b7ca6fe04eb Parents: c074880 Author: Arun SureshAuthored: Wed Nov 9 07:15:11 2016 -0800 Committer: Arun Suresh Committed: Wed Nov 9 07:15:11 2016 -0800 -- .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/280357c2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index e890b40..834ead7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -2724,7 +2724,7 @@ The address of the AMRMProxyService listener. yarn.nodemanager.amrmproxy.address -0.0.0.0:8048 +0.0.0.0:8049 - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: HADOOP-13800. Remove unused HADOOP_AUDIT_LOGGER from hadoop-env.sh. Contributed by Yiqun Lin.
Repository: hadoop Updated Branches: refs/heads/trunk 09f43fa9c -> c07488009 HADOOP-13800. Remove unused HADOOP_AUDIT_LOGGER from hadoop-env.sh. Contributed by Yiqun Lin. Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c0748800 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c0748800 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c0748800 Branch: refs/heads/trunk Commit: c074880096bd41470a3358f6002f30b57a725375 Parents: 09f43fa Author: Akira AjisakaAuthored: Wed Nov 9 22:02:40 2016 +0900 Committer: Akira Ajisaka Committed: Wed Nov 9 22:02:40 2016 +0900 -- .../hadoop-common/src/main/conf/hadoop-env.sh | 6 -- 1 file changed, 6 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/c0748800/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh -- diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh index a78f3f6..4fb9be9 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh @@ -223,12 +223,6 @@ esac # Java property: hadoop.security.logger # export HADOOP_SECURITY_LOGGER=INFO,NullAppender -# Default log level for file system audit messages. -# Generally, this is specifically set in the namenode-specific -# options line. -# Java property: hdfs.audit.logger -# export HADOOP_AUDIT_LOGGER=INFO,NullAppender - # Default process priority level # Note that sub-processes will also run at this level! # export HADOOP_NICENESS=0 - To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org For additional commands, e-mail: common-commits-h...@hadoop.apache.org
hadoop git commit: YARN-5736. YARN container executor config does not handle white space (miklos.szeg...@cloudera.com via rkanter)
Repository: hadoop Updated Branches: refs/heads/trunk 283fa33fe -> 09f43fa9c YARN-5736. YARN container executor config does not handle white space (miklos.szeg...@cloudera.com via rkanter) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09f43fa9 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09f43fa9 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09f43fa9 Branch: refs/heads/trunk Commit: 09f43fa9c089ebfc18401ce84755d3f2000ba033 Parents: 283fa33 Author: Robert KanterAuthored: Wed Nov 9 13:34:40 2016 +0100 Committer: Robert Kanter Committed: Wed Nov 9 13:34:40 2016 +0100 -- .../container-executor/impl/configuration.c | 41 +-- .../container-executor/impl/configuration.h | 9 +++ .../main/native/container-executor/impl/main.c | 5 +- .../test/test-container-executor.c | 77 +++- 4 files changed, 123 insertions(+), 9 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/09f43fa9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c index 69ceaf6..8da7d24 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.c @@ -31,6 +31,7 @@ #include #include #include +#include #define MAX_SIZE 10 @@ -126,6 +127,37 @@ int check_configuration_permissions(const char* file_name) { return 0; } +/** + * Trim whitespace from beginning and end. +*/ +char* trim(char* input) +{ +char *val_begin; +char *val_end; +char *ret; + +if (input == NULL) { + return NULL; +} + +val_begin = input; +val_end = input + strlen(input); + +while (val_begin < val_end && isspace(*val_begin)) + val_begin++; +while (val_end > val_begin && isspace(*(val_end - 1))) + val_end--; + +ret = (char *) malloc( +sizeof(char) * (val_end - val_begin + 1)); +if (ret == NULL) { + fprintf(ERRORFILE, "Allocation error\n"); + exit(OUT_OF_MEMORY); +} + +strncpy(ret, val_begin, val_end - val_begin); +return ret; +} void read_config(const char* file_name, struct configuration *cfg) { FILE *conf_file; @@ -202,9 +234,8 @@ void read_config(const char* file_name, struct configuration *cfg) { #endif memset(cfg->confdetails[cfg->size], 0, sizeof(struct confentry)); -cfg->confdetails[cfg->size]->key = (char *) malloc( -sizeof(char) * (strlen(equaltok)+1)); -strcpy((char *)cfg->confdetails[cfg->size]->key, equaltok); +cfg->confdetails[cfg->size]->key = trim(equaltok); + equaltok = strtok_r(NULL, "=", _equaltok); if (equaltok == NULL) { fprintf(LOGFILE, "configuration tokenization failed \n"); @@ -222,9 +253,7 @@ void read_config(const char* file_name, struct configuration *cfg) { fprintf(LOGFILE, "read_config : Adding conf value : %s \n", equaltok); #endif -cfg->confdetails[cfg->size]->value = (char *) malloc( -sizeof(char) * (strlen(equaltok)+1)); -strcpy((char *)cfg->confdetails[cfg->size]->value, equaltok); +cfg->confdetails[cfg->size]->value = trim(equaltok); if((cfg->size + 1) % MAX_SIZE == 0) { cfg->confdetails = (struct confentry **) realloc(cfg->confdetails, sizeof(struct confentry **) * (MAX_SIZE + cfg->size)); http://git-wip-us.apache.org/repos/asf/hadoop/blob/09f43fa9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h index eced13b..2d14867 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/configuration.h +++
hadoop git commit: YARN-5823. Update NMTokens in case of requests with only opportunistic containers. (Konstantinos Karanasos via asuresh)
Repository: hadoop Updated Branches: refs/heads/trunk ed0bebaba -> 283fa33fe YARN-5823. Update NMTokens in case of requests with only opportunistic containers. (Konstantinos Karanasos via asuresh) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/283fa33f Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/283fa33f Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/283fa33f Branch: refs/heads/trunk Commit: 283fa33febe043bd7b4fa87546be26c9c5a8f8b5 Parents: ed0beba Author: Arun SureshAuthored: Wed Nov 9 00:11:25 2016 -0800 Committer: Arun Suresh Committed: Wed Nov 9 00:11:25 2016 -0800 -- .../TestOpportunisticContainerAllocation.java | 71 +++- .../OpportunisticContainerAllocator.java| 55 --- .../containermanager/ContainerManagerImpl.java | 2 +- .../scheduler/DistributedScheduler.java | 19 -- .../ApplicationMasterService.java | 3 +- ...pportunisticContainerAllocatorAMService.java | 23 ++- 6 files changed, 137 insertions(+), 36 deletions(-) -- http://git-wip-us.apache.org/repos/asf/hadoop/blob/283fa33f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java -- diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java index b9b4b02..ace145d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestOpportunisticContainerAllocation.java @@ -229,6 +229,9 @@ public class TestOpportunisticContainerAllocation { amClient.registerApplicationMaster("Host", 1, ""); + testOpportunisticAllocation( + (AMRMClientImpl) amClient); + testAllocation((AMRMClientImpl)amClient); amClient @@ -247,7 +250,6 @@ public class TestOpportunisticContainerAllocation { final AMRMClientImpl amClient) throws YarnException, IOException { // setup container request - assertEquals(0, amClient.ask.size()); assertEquals(0, amClient.release.size()); @@ -388,6 +390,73 @@ public class TestOpportunisticContainerAllocation { assertEquals(0, amClient.release.size()); } + /** + * Tests allocation with requests comprising only opportunistic containers. + */ + private void testOpportunisticAllocation( + final AMRMClientImpl amClient) + throws YarnException, IOException { +// setup container request +assertEquals(0, amClient.ask.size()); +assertEquals(0, amClient.release.size()); + +amClient.addContainerRequest( +new AMRMClient.ContainerRequest(capability, null, null, priority, 0, +true, null, +ExecutionTypeRequest.newInstance( +ExecutionType.OPPORTUNISTIC, true))); +amClient.addContainerRequest( +new AMRMClient.ContainerRequest(capability, null, null, priority, 0, +true, null, +ExecutionTypeRequest.newInstance( +ExecutionType.OPPORTUNISTIC, true))); + +int oppContainersRequestedAny = +amClient.getTable(0).get(priority, ResourceRequest.ANY, +ExecutionType.OPPORTUNISTIC, capability).remoteRequest +.getNumContainers(); + +assertEquals(2, oppContainersRequestedAny); + +assertEquals(1, amClient.ask.size()); +assertEquals(0, amClient.release.size()); + +// RM should allocate container within 2 calls to allocate() +int allocatedContainerCount = 0; +int iterationsLeft = 10; +Set releases = new TreeSet<>(); + +amClient.getNMTokenCache().clearCache(); +Assert.assertEquals(0, +amClient.getNMTokenCache().numberOfTokensInCache()); +HashMap receivedNMTokens = new HashMap<>(); + +while (allocatedContainerCount < oppContainersRequestedAny +&& iterationsLeft-- > 0) { + AllocateResponse allocResponse = amClient.allocate(0.1f); + assertEquals(0, amClient.ask.size()); + assertEquals(0, amClient.release.size()); + + for (Container container : allocResponse.getAllocatedContainers()) { +allocatedContainerCount++; +ContainerId rejectContainerId = container.getId(); +releases.add(rejectContainerId); + } + + for (NMToken token :