[Hadoop Wiki] Update of "HowToContribute" by AkiraAjisaka

2018-12-05 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "HowToContribute" page has been changed by AkiraAjisaka:
https://wiki.apache.org/hadoop/HowToContribute?action=diff=119=120

Comment:
Fix url

  = How to Contribute to Hadoop =
- Content moved to Confluence - 
https://cwiki.apache.org/confluence/display/HADOOP/HowToContribute
+ Content moved to Confluence - 
https://cwiki.apache.org/confluence/display/HADOOP/How+To+Contribute
  
  Email common-...@hadoop.apache.org if you need write access to the cwiki.
  

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-9025. TestFairScheduler#testChildMaxResources is flaky. (Contributed by Szilard Nemeth)

2018-12-05 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 912b1f9d6 -> 5d4a43266


YARN-9025. TestFairScheduler#testChildMaxResources is flaky. (Contributed by 
Szilard Nemeth)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d4a4326
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d4a4326
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d4a4326

Branch: refs/heads/trunk
Commit: 5d4a432660abbb42303965df9a80097897389e66
Parents: 912b1f9
Author: Haibo Chen 
Authored: Wed Dec 5 16:36:13 2018 -0800
Committer: Haibo Chen 
Committed: Wed Dec 5 16:36:13 2018 -0800

--
 .../scheduler/fair/TestFairScheduler.java   | 102 +++
 1 file changed, 60 insertions(+), 42 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d4a4326/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index 0d6caeb..1a60693 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -18,35 +18,8 @@
 
 package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
 
-import static 
org.apache.hadoop.yarn.conf.YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
-
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-import java.io.PrintWriter;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.List;
-import java.util.Map;
-import java.util.Set;
-import java.util.stream.Collectors;
-
-import javax.xml.parsers.ParserConfigurationException;
-
 import com.google.common.collect.Lists;
+import com.google.common.collect.Sets;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.ha.HAServiceProtocol;
@@ -73,8 +46,7 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.exceptions
-.SchedulerInvalidResoureRequestException;
+import 
org.apache.hadoop.yarn.exceptions.SchedulerInvalidResoureRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
@@ -101,8 +73,6 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEven
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeResourceUpdateEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
-
-
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplicationAttempt;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
@@ -129,7 +99,32 @@ import org.junit.Test;
 import org.mockito.Mockito;
 import org.xml.sax.SAXException;
 
-import com.google.common.collect.Sets;
+import javax.xml.parsers.ParserConfigurationException;
+import java.io.File;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import 

hadoop git commit: YARN-9071. Improved status update for reinitialized containers. Contributed by Chandni Singh

2018-12-05 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 293c992e8 -> 7ef4ff190


YARN-9071.  Improved status update for reinitialized containers.
Contributed by Chandni Singh

(cherry picked from commit 1b790f4dd1f682423d5dbb8e70c6225cbddce989)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ef4ff19
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ef4ff19
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ef4ff19

Branch: refs/heads/branch-3.1
Commit: 7ef4ff19057bbb449385358ce0bc2c635ab2b5bb
Parents: 293c992
Author: Eric Yang 
Authored: Wed Dec 5 17:00:56 2018 -0500
Committer: Eric Yang 
Committed: Wed Dec 5 19:05:26 2018 -0500

--
 .../component/instance/ComponentInstance.java   | 76 +---
 .../instance/TestComponentInstance.java | 36 ++
 .../container/ContainerImpl.java| 23 --
 .../monitor/ContainerMetrics.java   |  8 ++-
 .../monitor/ContainerStopMonitoringEvent.java   | 12 
 .../monitor/ContainersMonitorImpl.java  |  6 +-
 .../monitor/TestContainerMetrics.java   |  6 +-
 7 files changed, 131 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ef4ff19/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
index 86b0e32..0145847 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
@@ -154,10 +154,14 @@ public class ComponentInstance implements 
EventHandler,
   REINITIALIZED), START, new StartedAfterUpgradeTransition())
   .addTransition(CANCEL_UPGRADING, EnumSet.of(CANCEL_UPGRADING, INIT),
   STOP, new StoppedAfterCancelUpgradeTransition())
+
+  // FROM REINITIALIZED
   .addTransition(REINITIALIZED, CANCEL_UPGRADING, CANCEL_UPGRADE,
   new CancelledAfterReinitTransition())
   .addTransition(REINITIALIZED, READY, BECOME_READY,
new ContainerBecomeReadyTransition(true))
+  .addTransition(REINITIALIZED, REINITIALIZED, STOP,
+  new StoppedAfterUpgradeTransition())
   .installTopology();
 
   public ComponentInstance(Component component,
@@ -184,20 +188,7 @@ public class ComponentInstance implements 
EventHandler,
 @Override public void transition(ComponentInstance compInstance,
 ComponentInstanceEvent event) {
   // Query container status for ip and host
-  boolean cancelOnSuccess = true;
-  if (compInstance.getCompSpec().getArtifact() != null && compInstance
-  .getCompSpec().getArtifact().getType() == Artifact.TypeEnum.DOCKER) {
-// A docker container might get a different IP if the container is
-// relaunched by the NM, so we need to keep checking the status.
-// This is a temporary fix until the NM provides a callback for
-// container relaunch (see YARN-8265).
-cancelOnSuccess = false;
-  }
-  compInstance.containerStatusFuture =
-  compInstance.scheduler.executorService.scheduleAtFixedRate(
-  new ContainerStatusRetriever(compInstance.scheduler,
-  event.getContainerId(), compInstance, cancelOnSuccess), 0, 1,
-  TimeUnit.SECONDS);
+  compInstance.initializeStatusRetriever(event);
   long containerStartTime = System.currentTimeMillis();
   try {
 ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
@@ -277,6 +268,7 @@ public class ComponentInstance implements 
EventHandler,
 
   instance.upgradeInProgress.set(false);
   instance.setContainerState(ContainerState.RUNNING_BUT_UNREADY);
+  instance.initializeStatusRetriever(event);
 
   Component.UpgradeStatus status = instance.getState().equals(UPGRADING) ?
   instance.component.getUpgradeStatus() :
@@ -572,13 +564,9 @@ public class ComponentInstance implements 
EventHandler,
   

hadoop git commit: YARN-9019. Ratio calculation of ResourceCalculator implementations could return NaN. (Contributed by Szilard Nemeth)

2018-12-05 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk b3c75c1f1 -> 912b1f9d6


YARN-9019. Ratio calculation of ResourceCalculator implementations could return 
NaN. (Contributed by Szilard Nemeth)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/912b1f9d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/912b1f9d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/912b1f9d

Branch: refs/heads/trunk
Commit: 912b1f9d64a61ef2663d95e2b4f286e6ee8d5ff9
Parents: b3c75c1
Author: Haibo Chen 
Authored: Wed Dec 5 15:15:30 2018 -0800
Committer: Haibo Chen 
Committed: Wed Dec 5 15:16:41 2018 -0800

--
 .../resource/DefaultResourceCalculator.java |  2 +-
 .../resource/DominantResourceCalculator.java|  4 +--
 .../yarn/util/resource/ResourceCalculator.java  | 18 ++
 .../util/resource/TestResourceCalculator.java   | 26 +++-
 4 files changed, 41 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/912b1f9d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
index ab6d7f5..9a3f703 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java
@@ -57,7 +57,7 @@ public class DefaultResourceCalculator extends 
ResourceCalculator {
 
   @Override
   public float ratio(Resource a, Resource b) {
-return (float)a.getMemorySize() / b.getMemorySize();
+return divideSafelyAsFloat(a.getMemorySize(), b.getMemorySize());
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/912b1f9d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
index 9aeb51c..29d7e7e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java
@@ -379,8 +379,8 @@ public class DominantResourceCalculator extends 
ResourceCalculator {
 for (int i = 0; i < maxLength; i++) {
   ResourceInformation aResourceInformation = a.getResourceInformation(i);
   ResourceInformation bResourceInformation = b.getResourceInformation(i);
-  float tmp = (float) aResourceInformation.getValue()
-  / (float) bResourceInformation.getValue();
+  final float tmp = divideSafelyAsFloat(aResourceInformation.getValue(),
+  bResourceInformation.getValue());
   ratio = ratio > tmp ? ratio : tmp;
 }
 return ratio;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/912b1f9d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
index 27394f7..09d5ec1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java
@@ -87,6 +87,24 @@ public abstract class ResourceCalculator {
 return (long) Math.ceil(a/b);
   }
 
+  /**
+   * Divides lhs by rhs.
+   * If both lhs and rhs are having a value of 0, then we return 0.
+   * This is to avoid division by zero and return NaN as a result.
+   * If lhs is zero but rhs is not, Float.infinity will be returned
+   * as the result.
+   * @param lhs
+   * @param rhs
+   * @return
+   */
+  

hadoop git commit: YARN-8985. Improve debug log in FSParentQueue when assigning container. (Contributed by Wilfred Spiegelenburg)

2018-12-05 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2bba329c1 -> b3c75c1f1


YARN-8985. Improve debug log in FSParentQueue when assigning container. 
(Contributed by  Wilfred Spiegelenburg)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3c75c1f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3c75c1f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3c75c1f

Branch: refs/heads/trunk
Commit: b3c75c1f1d347d1a9e5bd54decf27babb6a309f6
Parents: 2bba329
Author: Haibo Chen 
Authored: Wed Dec 5 15:02:57 2018 -0800
Committer: Haibo Chen 
Committed: Wed Dec 5 15:02:57 2018 -0800

--
 .../server/resourcemanager/scheduler/fair/FSParentQueue.java  | 7 ++-
 1 file changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b3c75c1f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
index 9a52d37..56a490b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java
@@ -196,7 +196,8 @@ public class FSParentQueue extends FSQueue {
 // If this queue is over its limit, reject
 if (!assignContainerPreCheck(node)) {
   if (LOG.isDebugEnabled()) {
-LOG.debug("Assign container precheck on node " + node + " failed");
+LOG.debug("Assign container precheck for queue " + getName() +
+" on node " + node.getNodeName() + " failed");
   }
   return assigned;
 }
@@ -212,6 +213,10 @@ public class FSParentQueue extends FSQueue {
 TreeSet sortedChildQueues = new TreeSet<>(policy.getComparator());
 readLock.lock();
 try {
+  if (LOG.isDebugEnabled()) {
+LOG.debug("Node " + node.getNodeName() + " offered to parent queue: " +
+getName() + " visiting " + childQueues.size() + " children");
+  }
   sortedChildQueues.addAll(childQueues);
   for (FSQueue child : sortedChildQueues) {
 assigned = child.assignContainer(node);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8994. Fix race condition between move app and queue cleanup in Fair Scheduler. (Contributed by Wilfred Spiegelenburg)

2018-12-05 Thread haibochen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1b790f4dd -> 2bba329c1


YARN-8994. Fix race condition between move app and queue cleanup in Fair 
Scheduler. (Contributed by Wilfred Spiegelenburg)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2bba329c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2bba329c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2bba329c

Branch: refs/heads/trunk
Commit: 2bba329c141dea17a1a9f7112f7155e11264ccfd
Parents: 1b790f4
Author: Haibo Chen 
Authored: Wed Dec 5 14:38:15 2018 -0800
Committer: Haibo Chen 
Committed: Wed Dec 5 14:38:15 2018 -0800

--
 .../scheduler/fair/FairScheduler.java   | 52 
 1 file changed, 31 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2bba329c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index e5d2a06..282367e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -1735,13 +1735,15 @@ public class FairScheduler extends
   String queueName) throws YarnException {
 writeLock.lock();
 try {
+  // app could have finished between pre check and now
   SchedulerApplication app = applications.get(appId);
   if (app == null) {
 throw new YarnException("App to be moved " + appId + " not found.");
   }
-  FSAppAttempt attempt = (FSAppAttempt) app.getCurrentAppAttempt();
-  // To serialize with FairScheduler#allocate, synchronize on app attempt
+  FSLeafQueue targetQueue = null;
 
+  // To serialize with FairScheduler#allocate, synchronize on app attempt
+  FSAppAttempt attempt = app.getCurrentAppAttempt();
   attempt.getWriteLock().lock();
   try {
 FSLeafQueue oldQueue = (FSLeafQueue) app.getQueue();
@@ -1753,7 +1755,9 @@ public class FairScheduler extends
   + " is stopped and can't be moved!");
 }
 String destQueueName = handleMoveToPlanQueue(queueName);
-FSLeafQueue targetQueue = queueMgr.getLeafQueue(destQueueName, false);
+// Prevent removal of the queue while the move is in progress by
+// registering the app as submitted to the queue.
+targetQueue = queueMgr.getLeafQueue(destQueueName, false, appId);
 if (targetQueue == null) {
   throw new YarnException("Target queue " + queueName
   + " not found or is not a leaf queue.");
@@ -1766,9 +1770,14 @@ public class FairScheduler extends
   verifyMoveDoesNotViolateConstraints(attempt, oldQueue, targetQueue);
 }
 
+// The move itself will clean up the app submit registration.
 executeMove(app, attempt, oldQueue, targetQueue);
 return targetQueue.getQueueName();
   } finally {
+// Cleanup the submit registration in case of move failure.
+if (targetQueue != null) {
+  targetQueue.removeAssignedApp(appId);
+}
 attempt.getWriteLock().unlock();
   }
 } finally {
@@ -1776,6 +1785,17 @@ public class FairScheduler extends
 }
   }
 
+  /**
+   * Perform pre-checks while moving the application. This should not check any
+   * application values that can change since the check is not part of an
+   * atomic action. During a move the scheduler can still assign containers and
+   * the app can still be updated until the real move is performed under
+   * proper locking.
+   *
+   * @param appId The ID of the app to be moved
+   * @param newQueue The name of the queue the app should move to
+   * @throws YarnException if the validate fails
+   */
   @Override
   public void preValidateMoveApplication(ApplicationId appId, String newQueue)
   throws YarnException {
@@ -1786,24 +1806,14 @@ public class FairScheduler extends
 throw new YarnException("App to be moved " + appId + " not found.");
   }
 
-  FSAppAttempt attempt = app.getCurrentAppAttempt();
-  

hadoop git commit: YARN-9071. Improved status update for reinitialized containers. Contributed by Chandni Singh

2018-12-05 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1dabb31cd -> 1b790f4dd


YARN-9071.  Improved status update for reinitialized containers.
Contributed by Chandni Singh


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b790f4d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b790f4d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b790f4d

Branch: refs/heads/trunk
Commit: 1b790f4dd1f682423d5dbb8e70c6225cbddce989
Parents: 1dabb31
Author: Eric Yang 
Authored: Wed Dec 5 17:00:56 2018 -0500
Committer: Eric Yang 
Committed: Wed Dec 5 17:00:56 2018 -0500

--
 .../component/instance/ComponentInstance.java   | 76 +---
 .../instance/TestComponentInstance.java | 36 ++
 .../container/ContainerImpl.java| 23 --
 .../monitor/ContainerMetrics.java   |  8 ++-
 .../monitor/ContainerStopMonitoringEvent.java   | 12 
 .../monitor/ContainersMonitorImpl.java  |  6 +-
 .../monitor/TestContainerMetrics.java   |  6 +-
 7 files changed, 131 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b790f4d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
index 25aba77..ef844a5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/component/instance/ComponentInstance.java
@@ -152,10 +152,14 @@ public class ComponentInstance implements 
EventHandler,
   REINITIALIZED), START, new StartedAfterUpgradeTransition())
   .addTransition(CANCEL_UPGRADING, EnumSet.of(CANCEL_UPGRADING, INIT),
   STOP, new StoppedAfterCancelUpgradeTransition())
+
+  // FROM REINITIALIZED
   .addTransition(REINITIALIZED, CANCEL_UPGRADING, CANCEL_UPGRADE,
   new CancelledAfterReinitTransition())
   .addTransition(REINITIALIZED, READY, BECOME_READY,
new ContainerBecomeReadyTransition(true))
+  .addTransition(REINITIALIZED, REINITIALIZED, STOP,
+  new StoppedAfterUpgradeTransition())
   .installTopology();
 
   public ComponentInstance(Component component,
@@ -182,20 +186,7 @@ public class ComponentInstance implements 
EventHandler,
 @Override public void transition(ComponentInstance compInstance,
 ComponentInstanceEvent event) {
   // Query container status for ip and host
-  boolean cancelOnSuccess = true;
-  if (compInstance.getCompSpec().getArtifact() != null && compInstance
-  .getCompSpec().getArtifact().getType() == Artifact.TypeEnum.DOCKER) {
-// A docker container might get a different IP if the container is
-// relaunched by the NM, so we need to keep checking the status.
-// This is a temporary fix until the NM provides a callback for
-// container relaunch (see YARN-8265).
-cancelOnSuccess = false;
-  }
-  compInstance.containerStatusFuture =
-  compInstance.scheduler.executorService.scheduleAtFixedRate(
-  new ContainerStatusRetriever(compInstance.scheduler,
-  event.getContainerId(), compInstance, cancelOnSuccess), 0, 1,
-  TimeUnit.SECONDS);
+  compInstance.initializeStatusRetriever(event);
   long containerStartTime = System.currentTimeMillis();
   try {
 ContainerTokenIdentifier containerTokenIdentifier = BuilderUtils
@@ -275,6 +266,7 @@ public class ComponentInstance implements 
EventHandler,
 
   instance.upgradeInProgress.set(false);
   instance.setContainerState(ContainerState.RUNNING_BUT_UNREADY);
+  instance.initializeStatusRetriever(event);
 
   Component.UpgradeStatus status = instance.getState().equals(UPGRADING) ?
   instance.component.getUpgradeStatus() :
@@ -570,13 +562,9 @@ public class ComponentInstance implements 
EventHandler,
   instance.setContainerState(ContainerState.UPGRADING);
   instance.component.decContainersReady(false);
 
- 

[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-12943

2018-12-05 Thread shv
Merge branch 'trunk' into HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47d72601
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47d72601
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47d72601

Branch: refs/heads/HDFS-12943
Commit: 47d726015810d091f58387a0fc6bcc0ad30f4b02
Parents: b549707 1dabb31
Author: Konstantin V Shvachko 
Authored: Wed Dec 5 13:38:45 2018 -0800
Committer: Konstantin V Shvachko 
Committed: Wed Dec 5 13:38:45 2018 -0800

--
 .../resources/assemblies/hadoop-yarn-dist.xml   |  23 +-
 .../authentication/util/KerberosName.java   |   9 +-
 .../TestKerberosAuthenticationHandler.java  |   7 +-
 .../authentication/util/TestKerberosName.java   |  17 +-
 .../hadoop-common/src/main/conf/hadoop-env.sh   |  13 -
 .../hadoop/fs/FileSystemMultipartUploader.java  |  36 +-
 .../org/apache/hadoop/fs/MultipartUploader.java |  88 ++-
 .../hadoop/fs/MultipartUploaderFactory.java |   7 +
 .../apache/hadoop/fs/shell/CopyCommands.java|   7 +-
 .../java/org/apache/hadoop/security/KDiag.java  |  46 +-
 .../hadoop/security/LdapGroupsMapping.java  | 163 ++--
 .../src/main/resources/core-default.xml |  28 +-
 .../src/site/markdown/GroupsMapping.md  |  54 +-
 .../src/site/markdown/SecureMode.md |   6 -
 .../src/site/markdown/SingleCluster.md.vm   |   3 -
 .../src/site/markdown/filesystem/index.md   |   1 +
 .../markdown/filesystem/multipartuploader.md| 235 ++
 .../AbstractContractMultipartUploaderTest.java  | 565 +++---
 .../TestLocalFSContractMultipartUploader.java   |  10 +
 .../hadoop/fs/shell/TestCopyPreserveFlag.java   |  10 +
 .../org/apache/hadoop/security/TestKDiag.java   |  16 -
 .../hadoop/security/TestLdapGroupsMapping.java  |  80 +-
 .../security/TestLdapGroupsMappingBase.java |  76 +-
 .../TestLdapGroupsMappingWithFailover.java  | 142 
 .../TestLdapGroupsMappingWithOneQuery.java  |  16 +-
 .../TestLdapGroupsMappingWithPosixGroup.java|  10 +-
 .../security/TestUserGroupInformation.java  |  27 +-
 .../hdds/scm/storage/ChunkInputStream.java  |  11 +
 .../hdds/scm/storage/ChunkOutputStream.java |  21 +-
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   8 +
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   8 +
 .../hdds/scm/container/ContainerInfo.java   |  29 +-
 .../scm/storage/ContainerProtocolCalls.java |  14 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  27 +-
 .../org/apache/hadoop/ozone/audit/DNAction.java |  44 +-
 .../apache/hadoop/ozone/common/Checksum.java| 249 ++
 .../hadoop/ozone/common/ChecksumData.java   | 190 +
 .../ozone/common/OzoneChecksumException.java|  66 ++
 .../container/common/helpers/ChunkInfo.java |  33 +-
 .../hadoop/utils/db/ByteArrayKeyValue.java  |  67 ++
 .../java/org/apache/hadoop/utils/db/Codec.java  |  38 +
 .../apache/hadoop/utils/db/CodecRegistry.java   |  70 ++
 .../org/apache/hadoop/utils/db/DBStore.java |  12 +-
 .../org/apache/hadoop/utils/db/RDBStore.java|  12 +-
 .../hadoop/utils/db/RDBStoreIterator.java   |  20 +-
 .../org/apache/hadoop/utils/db/RDBTable.java|   7 +-
 .../org/apache/hadoop/utils/db/StringCodec.java |  45 ++
 .../java/org/apache/hadoop/utils/db/Table.java  |  63 +-
 .../apache/hadoop/utils/db/TableIterator.java   |   6 +-
 .../org/apache/hadoop/utils/db/TypedTable.java  | 184 +
 .../main/proto/DatanodeContainerProtocol.proto  |  21 +-
 hadoop-hdds/common/src/main/proto/hdds.proto|  20 +-
 .../common/src/main/resources/ozone-default.xml |  54 ++
 .../apache/hadoop/ozone/audit/DummyAction.java  |  36 +-
 .../hadoop/ozone/common/TestChecksum.java   | 101 +++
 .../hadoop/utils/db/TestDBStoreBuilder.java |   4 +-
 .../apache/hadoop/utils/db/TestRDBStore.java|  31 +-
 .../hadoop/utils/db/TestRDBTableStore.java  |   4 +-
 .../hadoop/utils/db/TestTypedRDBTableStore.java | 235 ++
 .../container/common/impl/HddsDispatcher.java   |  10 +-
 .../common/interfaces/ContainerDispatcher.java  |   5 +-
 .../container/common/interfaces/Handler.java|   4 +-
 .../transport/server/GrpcXceiverService.java|   3 +-
 .../transport/server/XceiverServerGrpc.java |   2 +-
 .../server/ratis/ContainerStateMachine.java | 234 +++---
 .../server/ratis/DispatcherContext.java | 133 
 .../server/ratis/XceiverServerRatis.java|  14 +-
 .../container/keyvalue/KeyValueHandler.java |  91 ++-
 .../container/keyvalue/helpers/BlockUtils.java  |   8 +-
 .../container/keyvalue/helpers/ChunkUtils.java  |  54 +-
 .../keyvalue/helpers/SmallFileUtils.java|  10 +-
 .../keyvalue/impl/ChunkManagerImpl.java |  52 +-
 .../keyvalue/interfaces/ChunkManager.java   |  12 +-
 .../common/impl/TestHddsDispatcher.java |  16 +-
 .../keyvalue/TestChunkManagerImpl.java  |  

[07/50] [abbrv] hadoop git commit: YARN-9067. YARN Resource Manager is running OOM because of leak of Configuration Object. Contributed by Eric Yang.

2018-12-05 Thread shv
YARN-9067. YARN Resource Manager is running OOM because of leak of 
Configuration Object. Contributed by Eric Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/efc4d91c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/efc4d91c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/efc4d91c

Branch: refs/heads/HDFS-12943
Commit: efc4d91cbeab8a13f6d61cb0e56443adb2d77559
Parents: fe7dab8
Author: Weiwei Yang 
Authored: Thu Nov 29 09:34:14 2018 +0800
Committer: Weiwei Yang 
Committed: Thu Nov 29 09:34:14 2018 +0800

--
 .../hadoop/yarn/service/webapp/ApiServer.java   | 209 +++
 .../hadoop/yarn/service/ServiceClientTest.java  |   2 +-
 .../yarn/service/client/ServiceClient.java  |   1 +
 3 files changed, 126 insertions(+), 86 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/efc4d91c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
index db831ba..88aeefd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-api/src/main/java/org/apache/hadoop/yarn/service/webapp/ApiServer.java
@@ -118,10 +118,13 @@ public class ApiServer {
   @Override
   public Void run() throws YarnException, IOException {
 ServiceClient sc = getServiceClient();
-sc.init(YARN_CONFIG);
-sc.start();
-sc.actionBuild(service);
-sc.close();
+try {
+  sc.init(YARN_CONFIG);
+  sc.start();
+  sc.actionBuild(service);
+} finally {
+  sc.close();
+}
 return null;
   }
 });
@@ -133,11 +136,14 @@ public class ApiServer {
   @Override
   public ApplicationId run() throws IOException, YarnException {
 ServiceClient sc = getServiceClient();
-sc.init(YARN_CONFIG);
-sc.start();
-ApplicationId applicationId = sc.actionCreate(service);
-sc.close();
-return applicationId;
+try {
+  sc.init(YARN_CONFIG);
+  sc.start();
+  ApplicationId applicationId = sc.actionCreate(service);
+  return applicationId;
+} finally {
+  sc.close();
+}
   }
 });
 serviceStatus.setDiagnostics("Application ID: " + applicationId);
@@ -245,29 +251,32 @@ public class ApiServer {
   public Integer run() throws Exception {
 int result = 0;
 ServiceClient sc = getServiceClient();
-sc.init(YARN_CONFIG);
-sc.start();
-Exception stopException = null;
 try {
-  result = sc.actionStop(appName, destroy);
-  if (result == EXIT_SUCCESS) {
-LOG.info("Successfully stopped service {}", appName);
-  }
-} catch (Exception e) {
-  LOG.info("Got exception stopping service", e);
-  stopException = e;
-}
-if (destroy) {
-  result = sc.actionDestroy(appName);
-  if (result == EXIT_SUCCESS) {
-LOG.info("Successfully deleted service {}", appName);
+  sc.init(YARN_CONFIG);
+  sc.start();
+  Exception stopException = null;
+  try {
+result = sc.actionStop(appName, destroy);
+if (result == EXIT_SUCCESS) {
+  LOG.info("Successfully stopped service {}", appName);
+}
+  } catch (Exception e) {
+LOG.info("Got exception stopping service", e);
+stopException = e;
   }
-} else {
-  if (stopException != null) {
-throw stopException;
+  if (destroy) {
+result = sc.actionDestroy(appName);
+if (result == EXIT_SUCCESS) {
+  LOG.info("Successfully deleted service {}", appName);
+}
+  } else {
+if (stopException != null) {
+  throw stopException;
+

[31/50] [abbrv] hadoop git commit: HDFS-14075. Terminate the namenode when failed to start log segment. Contributed by Ayush Saxena.

2018-12-05 Thread shv
HDFS-14075. Terminate the namenode when failed to start log segment. 
Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/042c8ef5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/042c8ef5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/042c8ef5

Branch: refs/heads/HDFS-12943
Commit: 042c8ef593ced1915a688e99aa9a6a52fdf66734
Parents: 8f3e12f
Author: Surendra Singh Lilhore 
Authored: Sun Dec 2 12:31:08 2018 +0530
Committer: Surendra Singh Lilhore 
Committed: Sun Dec 2 12:31:08 2018 +0530

--
 .../hadoop/hdfs/server/namenode/FSEditLog.java  | 11 +++--
 .../hadoop/hdfs/server/namenode/JournalSet.java | 10 -
 .../hadoop/hdfs/qjournal/TestNNWithQJM.java | 15 +++
 .../hdfs/server/namenode/TestEditLog.java   |  8 +++-
 .../namenode/TestEditLogJournalFailures.java| 44 
 5 files changed, 74 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/042c8ef5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index 547ad57..56aa927 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -1382,10 +1382,15 @@ public class FSEditLog implements LogsPurgeable {
 try {
   editLogStream = journalSet.startLogSegment(segmentTxId, layoutVersion);
 } catch (IOException ex) {
-  throw new IOException("Unable to start log segment " +
-  segmentTxId + ": too few journals successfully started.", ex);
+  final String msg = "Unable to start log segment " + segmentTxId
+  + ": too few journals successfully started.";
+  LOG.error(msg, ex);
+  synchronized (journalSetLock) {
+IOUtils.cleanupWithLogger(LOG, journalSet);
+  }
+  terminate(1, msg);
 }
-
+
 curSegmentTxId = segmentTxId;
 state = State.IN_SEGMENT;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042c8ef5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
index 7be7073..4ab0828 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableListMultimap;
@@ -76,7 +77,7 @@ public class JournalSet implements JournalManager {
* stream, then the stream will be aborted and set to null.
*/
   static class JournalAndStream implements CheckableNameNodeResource {
-private final JournalManager journal;
+private JournalManager journal;
 private boolean disabled = false;
 private EditLogOutputStream stream;
 private final boolean required;
@@ -146,7 +147,12 @@ public class JournalSet implements JournalManager {
 void setCurrentStreamForTests(EditLogOutputStream stream) {
   this.stream = stream;
 }
-
+
+@VisibleForTesting
+void setJournalForTests(JournalManager jm) {
+  this.journal = jm;
+}
+
 JournalManager getManager() {
   return journal;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/042c8ef5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
index d713bc7..4483667 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java
+++ 

[04/50] [abbrv] hadoop git commit: YARN-8974. Improve the assertion message in TestGPUResourceHandler. (Zhankun Tang via wangda)

2018-12-05 Thread shv
YARN-8974. Improve the assertion message in TestGPUResourceHandler. (Zhankun 
Tang via wangda)

Change-Id: I4eb58e9d251d5f54e7feffc4fbb813b4f5ae4b1b


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ebeda98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ebeda98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ebeda98

Branch: refs/heads/HDFS-12943
Commit: 8ebeda98a9d3eac45598a33bae3d62e3ebb92cad
Parents: 9ed8756
Author: Wangda Tan 
Authored: Wed Nov 28 14:36:30 2018 -0800
Committer: Wangda Tan 
Committed: Wed Nov 28 14:36:30 2018 -0800

--
 .../linux/resources/gpu/TestGpuResourceHandler.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ebeda98/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
index 10e5cd1..18785e1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/gpu/TestGpuResourceHandler.java
@@ -465,7 +465,7 @@ public class TestGpuResourceHandler {
   caughtException = true;
 }
 Assert.assertTrue(
-"Should fail since requested device Id is not in allowed list",
+"Should fail since requested device Id is already assigned",
 caughtException);
 
 // Make sure internal state not changed.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: HDFS-13713. Add specification of Multipart Upload API to FS specification, with contract tests.

2018-12-05 Thread shv
HDFS-13713. Add specification of Multipart Upload API to FS specification, with 
contract tests.

Contributed by Ewan Higgs and Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1d24f84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1d24f84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1d24f84

Branch: refs/heads/HDFS-12943
Commit: c1d24f848345f6d34a2ac2d570d49e9787a0df6a
Parents: b71cc7f
Author: Ewan Higgs 
Authored: Thu Nov 29 15:11:07 2018 +
Committer: Steve Loughran 
Committed: Thu Nov 29 15:12:17 2018 +

--
 .../hadoop/fs/FileSystemMultipartUploader.java  |  36 +-
 .../org/apache/hadoop/fs/MultipartUploader.java |  88 ++-
 .../hadoop/fs/MultipartUploaderFactory.java |   7 +
 .../src/site/markdown/filesystem/index.md   |   1 +
 .../markdown/filesystem/multipartuploader.md| 235 
 .../AbstractContractMultipartUploaderTest.java  | 565 +++
 .../TestLocalFSContractMultipartUploader.java   |  10 +
 .../hdfs/TestHDFSContractMultipartUploader.java |  15 +
 .../hadoop/fs/s3a/S3AMultipartUploader.java |  31 +-
 .../s3a/ITestS3AContractMultipartUploader.java  |  64 ++-
 .../fs/s3a/TestS3AMultipartUploaderSupport.java |   2 +-
 11 files changed, 876 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1d24f84/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
index 94c7861..b77c244 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystemMultipartUploader.java
@@ -19,21 +19,23 @@ package org.apache.hadoop.fs;
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.ByteBuffer;
+import java.util.ArrayList;
 import java.util.Comparator;
 import java.util.List;
+import java.util.Map;
+import java.util.UUID;
 import java.util.stream.Collectors;
 
 import com.google.common.base.Charsets;
-import com.google.common.base.Preconditions;
 
 import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.lang3.tuple.Pair;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 
 import static org.apache.hadoop.fs.Path.mergePaths;
+import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
 
 /**
  * A MultipartUploader that uses the basic FileSystem commands.
@@ -70,7 +72,8 @@ public class FileSystemMultipartUploader extends 
MultipartUploader {
   public PartHandle putPart(Path filePath, InputStream inputStream,
   int partNumber, UploadHandle uploadId, long lengthInBytes)
   throws IOException {
-
+checkPutArguments(filePath, inputStream, partNumber, uploadId,
+lengthInBytes);
 byte[] uploadIdByteArray = uploadId.toByteArray();
 checkUploadId(uploadIdByteArray);
 Path collectorPath = new Path(new String(uploadIdByteArray, 0,
@@ -82,16 +85,17 @@ public class FileSystemMultipartUploader extends 
MultipartUploader {
 fs.createFile(partPath).build()) {
   IOUtils.copy(inputStream, fsDataOutputStream, 4096);
 } finally {
-  org.apache.hadoop.io.IOUtils.cleanupWithLogger(LOG, inputStream);
+  cleanupWithLogger(LOG, inputStream);
 }
 return BBPartHandle.from(ByteBuffer.wrap(
 partPath.toString().getBytes(Charsets.UTF_8)));
   }
 
   private Path createCollectorPath(Path filePath) {
+String uuid = UUID.randomUUID().toString();
 return mergePaths(filePath.getParent(),
 mergePaths(new Path(filePath.getName().split("\\.")[0]),
-mergePaths(new Path("_multipart"),
+mergePaths(new Path("_multipart_" + uuid),
 new Path(Path.SEPARATOR;
   }
 
@@ -110,21 +114,16 @@ public class FileSystemMultipartUploader extends 
MultipartUploader {
 
   @Override
   @SuppressWarnings("deprecation") // rename w/ OVERWRITE
-  public PathHandle complete(Path filePath,
-  List> handles, UploadHandle multipartUploadId)
-  throws IOException {
+  public PathHandle complete(Path filePath, Map handleMap,
+  UploadHandle multipartUploadId) throws IOException {
 
 checkUploadId(multipartUploadId.toByteArray());
 
-if (handles.isEmpty()) {
-  throw new IOException("Empty upload");
-}
-// 

[45/50] [abbrv] hadoop git commit: Revert "HADOOP-15852. Refactor QuotaUsage. Contributed by Beluga Behr."

2018-12-05 Thread shv
Revert "HADOOP-15852. Refactor QuotaUsage. Contributed by Beluga Behr."

This reverts commit fb9deed41d6b9f242474b474a5acde0c858e28f6.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa89492f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa89492f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa89492f

Branch: refs/heads/HDFS-12943
Commit: aa89492f29fafa471f99ff225602752bdb9b5c8f
Parents: e89941f
Author: Giovanni Matteo Fumarola 
Authored: Tue Dec 4 12:57:28 2018 -0800
Committer: Giovanni Matteo Fumarola 
Committed: Tue Dec 4 12:57:28 2018 -0800

--
 .../java/org/apache/hadoop/fs/QuotaUsage.java   | 108 +--
 1 file changed, 53 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa89492f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
index 4e42e5b..3472362 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
@@ -40,12 +40,14 @@ public class QuotaUsage {
   /** Builder class for QuotaUsage. */
   public static class Builder {
 public Builder() {
-  this.quota = -1L;
-  this.spaceQuota = -1L;
+  this.quota = -1;
+  this.spaceQuota = -1;
 
   typeConsumed = new long[StorageType.values().length];
   typeQuota = new long[StorageType.values().length];
-  Arrays.fill(typeQuota, -1L);
+  for (int i = 0; i < typeQuota.length; i++) {
+typeQuota[i] = -1;
+  }
 }
 
 public Builder fileAndDirectoryCount(long count) {
@@ -69,8 +71,9 @@ public class QuotaUsage {
 }
 
 public Builder typeConsumed(long[] typeConsumed) {
-  System.arraycopy(typeConsumed, 0, this.typeConsumed, 0,
-  typeConsumed.length);
+  for (int i = 0; i < typeConsumed.length; i++) {
+this.typeConsumed[i] = typeConsumed[i];
+  }
   return this;
 }
 
@@ -85,8 +88,9 @@ public class QuotaUsage {
 }
 
 public Builder typeQuota(long[] typeQuota) {
-  System.arraycopy(typeQuota, 0, this.typeQuota, 0,
-  typeQuota.length);
+  for (int i = 0; i < typeQuota.length; i++) {
+this.typeQuota[i] = typeQuota[i];
+  }
   return this;
 }
 
@@ -149,12 +153,22 @@ public class QuotaUsage {
 
   /** Return storage type quota. */
   public long getTypeQuota(StorageType type) {
-return (typeQuota != null) ? typeQuota[type.ordinal()] : -1L;
+return (typeQuota != null) ? typeQuota[type.ordinal()] : -1;
   }
 
   /** Return storage type consumed. */
   public long getTypeConsumed(StorageType type) {
-return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0L;
+return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0;
+  }
+
+  /** Return storage type quota. */
+  private long[] getTypesQuota() {
+return typeQuota;
+  }
+
+  /** Return storage type quota. */
+  private long[] getTypesConsumed() {
+return typeConsumed;
   }
 
   /** Return true if any storage type quota has been set. */
@@ -163,7 +177,7 @@ public class QuotaUsage {
   return false;
 }
 for (StorageType t : StorageType.getTypesSupportingQuota()) {
-  if (typeQuota[t.ordinal()] > 0L) {
+  if (typeQuota[t.ordinal()] > 0) {
 return true;
   }
 }
@@ -176,7 +190,7 @@ public class QuotaUsage {
   return false;
 }
 for (StorageType t : StorageType.getTypesSupportingQuota()) {
-  if (typeConsumed[t.ordinal()] > 0L) {
+  if (typeConsumed[t.ordinal()] > 0) {
 return true;
   }
 }
@@ -184,50 +198,33 @@ public class QuotaUsage {
   }
 
   @Override
-  public int hashCode() {
-final int prime = 31;
-int result = 1;
-result = prime * result
-+ (int) (fileAndDirectoryCount ^ (fileAndDirectoryCount >>> 32));
-result = prime * result + (int) (quota ^ (quota >>> 32));
-result = prime * result + (int) (spaceConsumed ^ (spaceConsumed >>> 32));
-result = prime * result + (int) (spaceQuota ^ (spaceQuota >>> 32));
-result = prime * result + Arrays.hashCode(typeConsumed);
-result = prime * result + Arrays.hashCode(typeQuota);
-return result;
+  public boolean equals(Object to) {
+return (this == to || (to instanceof QuotaUsage &&
+getFileAndDirectoryCount() ==
+((QuotaUsage) to).getFileAndDirectoryCount() &&
+getQuota() == ((QuotaUsage) to).getQuota() &&
+getSpaceConsumed() == ((QuotaUsage) 

[13/50] [abbrv] hadoop git commit: HADOOP-14927. ITestS3GuardTool failures in testDestroyNoBucket(). Contributed by Gabor Bota.

2018-12-05 Thread shv
HADOOP-14927. ITestS3GuardTool failures in testDestroyNoBucket(). Contributed 
by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7eb0d3a3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7eb0d3a3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7eb0d3a3

Branch: refs/heads/HDFS-12943
Commit: 7eb0d3a32435da110dc9e6004dba8c5c9b082c35
Parents: 184cced
Author: Sean Mackrory 
Authored: Wed Nov 28 16:57:12 2018 -0700
Committer: Sean Mackrory 
Committed: Thu Nov 29 09:36:39 2018 -0700

--
 .../hadoop/fs/s3a/s3guard/S3GuardTool.java  | 38 
 1 file changed, 24 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7eb0d3a3/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
index 1316121..aea57a6 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/S3GuardTool.java
@@ -218,6 +218,27 @@ public abstract class S3GuardTool extends Configured 
implements Tool {
 format.addOptionWithValue(SECONDS_FLAG);
   }
 
+  protected void checkMetadataStoreUri(List paths) throws IOException {
+// be sure that path is provided in params, so there's no IOoBE
+String s3Path = "";
+if(!paths.isEmpty()) {
+  s3Path = paths.get(0);
+}
+
+// Check if DynamoDB url is set from arguments.
+String metadataStoreUri = getCommandFormat().getOptValue(META_FLAG);
+if(metadataStoreUri == null || metadataStoreUri.isEmpty()) {
+  // If not set, check if filesystem is guarded by creating an
+  // S3AFileSystem and check if hasMetadataStore is true
+  try (S3AFileSystem s3AFileSystem = (S3AFileSystem)
+  S3AFileSystem.newInstance(toUri(s3Path), getConf())){
+Preconditions.checkState(s3AFileSystem.hasMetadataStore(),
+"The S3 bucket is unguarded. " + getName()
++ " can not be used on an unguarded bucket.");
+  }
+}
+  }
+
   /**
* Parse metadata store from command line option or HDFS configuration.
*
@@ -500,20 +521,7 @@ public abstract class S3GuardTool extends Configured 
implements Tool {
 public int run(String[] args, PrintStream out) throws Exception {
   List paths = parseArgs(args);
   Map options = new HashMap<>();
-  String s3Path = paths.get(0);
-
-  // Check if DynamoDB url is set from arguments.
-  String metadataStoreUri = getCommandFormat().getOptValue(META_FLAG);
-  if(metadataStoreUri == null || metadataStoreUri.isEmpty()) {
-// If not set, check if filesystem is guarded by creating an
-// S3AFileSystem and check if hasMetadataStore is true
-try (S3AFileSystem s3AFileSystem = (S3AFileSystem)
-S3AFileSystem.newInstance(toUri(s3Path), getConf())){
-  Preconditions.checkState(s3AFileSystem.hasMetadataStore(),
-  "The S3 bucket is unguarded. " + getName()
-  + " can not be used on an unguarded bucket.");
-}
-  }
+  checkMetadataStoreUri(paths);
 
   String readCap = getCommandFormat().getOptValue(READ_FLAG);
   if (StringUtils.isNotEmpty(readCap)) {
@@ -590,6 +598,8 @@ public abstract class S3GuardTool extends Configured 
implements Tool {
 throw e;
   }
 
+  checkMetadataStoreUri(paths);
+
   try {
 initMetadataStore(false);
   } catch (FileNotFoundException e) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[36/50] [abbrv] hadoop git commit: HADOOP-15950. Failover for LdapGroupsMapping. Contributed by Lukas Majercak.

2018-12-05 Thread shv
HADOOP-15950. Failover for LdapGroupsMapping. Contributed by Lukas Majercak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c9a3aa64
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c9a3aa64
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c9a3aa64

Branch: refs/heads/HDFS-12943
Commit: c9a3aa64dc95b097c51070f71a3b1a2ad126b2b9
Parents: ef3b03b
Author: Giovanni Matteo Fumarola 
Authored: Mon Dec 3 12:10:05 2018 -0800
Committer: Giovanni Matteo Fumarola 
Committed: Mon Dec 3 12:10:05 2018 -0800

--
 .../hadoop/security/LdapGroupsMapping.java  | 163 +--
 .../src/main/resources/core-default.xml |  28 +++-
 .../src/site/markdown/GroupsMapping.md  |  54 +-
 .../hadoop/security/TestLdapGroupsMapping.java  |  80 -
 .../security/TestLdapGroupsMappingBase.java |  76 -
 .../TestLdapGroupsMappingWithFailover.java  | 142 
 .../TestLdapGroupsMappingWithOneQuery.java  |  16 +-
 .../TestLdapGroupsMappingWithPosixGroup.java|  10 +-
 8 files changed, 450 insertions(+), 119 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c9a3aa64/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
index 6beaa9e..83eb5ad 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
@@ -25,6 +25,7 @@ import java.nio.charset.StandardCharsets;
 import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Hashtable;
+import java.util.Iterator;
 import java.util.List;
 import java.util.HashSet;
 import java.util.Collection;
@@ -40,7 +41,10 @@ import javax.naming.directory.SearchControls;
 import javax.naming.directory.SearchResult;
 import javax.naming.ldap.LdapName;
 import javax.naming.ldap.Rdn;
+import javax.naming.spi.InitialContextFactory;
 
+import com.google.common.collect.Iterators;
+import com.sun.jndi.ldap.LdapCtxFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
@@ -83,7 +87,7 @@ public class LdapGroupsMapping
   public static final String LDAP_CONFIG_PREFIX = 
"hadoop.security.group.mapping.ldap";
 
   /*
-   * URL of the LDAP server
+   * URL of the LDAP server(s)
*/
   public static final String LDAP_URL_KEY = LDAP_CONFIG_PREFIX + ".url";
   public static final String LDAP_URL_DEFAULT = "";
@@ -232,6 +236,20 @@ public class LdapGroupsMapping
   LDAP_CONFIG_PREFIX + ".read.timeout.ms";
   public static final int READ_TIMEOUT_DEFAULT = 60 * 1000; // 60 seconds
 
+  public static final String LDAP_NUM_ATTEMPTS_KEY =
+  LDAP_CONFIG_PREFIX + ".num.attempts";
+  public static final int LDAP_NUM_ATTEMPTS_DEFAULT = 3;
+
+  public static final String LDAP_NUM_ATTEMPTS_BEFORE_FAILOVER_KEY =
+  LDAP_CONFIG_PREFIX + ".num.attempts.before.failover";
+  public static final int LDAP_NUM_ATTEMPTS_BEFORE_FAILOVER_DEFAULT =
+  LDAP_NUM_ATTEMPTS_DEFAULT;
+
+  public static final String LDAP_CTX_FACTORY_CLASS_KEY =
+  LDAP_CONFIG_PREFIX + ".ctx.factory.class";
+  public static final Class
+  LDAP_CTX_FACTORY_CLASS_DEFAULT = LdapCtxFactory.class;
+
   private static final Logger LOG =
   LoggerFactory.getLogger(LdapGroupsMapping.class);
 
@@ -242,8 +260,10 @@ public class LdapGroupsMapping
 
   private DirContext ctx;
   private Configuration conf;
-  
-  private String ldapUrl;
+
+  private Iterator ldapUrls;
+  private String currentLdapUrl;
+
   private boolean useSsl;
   private String keystore;
   private String keystorePass;
@@ -258,14 +278,15 @@ public class LdapGroupsMapping
   private String memberOfAttr;
   private String groupMemberAttr;
   private String groupNameAttr;
-  private intgroupHierarchyLevels;
+  private int groupHierarchyLevels;
   private String posixUidAttr;
   private String posixGidAttr;
   private boolean isPosix;
   private boolean useOneQuery;
+  private int numAttempts;
+  private int numAttemptsBeforeFailover;
+  private Class ldapCxtFactoryClass;
 
-  public static final int RECONNECT_RETRY_COUNT = 3;
-  
   /**
* Returns list of groups for a user.
* 
@@ -279,20 +300,31 @@ public class LdapGroupsMapping
   @Override
   public synchronized List getGroups(String user) {
 /*
- * Normal garbage collection takes care of 

[28/50] [abbrv] hadoop git commit: HDDS-748. Use strongly typed metadata Table implementation. Contributed by Elek Marton.

2018-12-05 Thread shv
HDDS-748. Use strongly typed metadata Table implementation. Contributed by Elek 
Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d15dc436
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d15dc436
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d15dc436

Branch: refs/heads/HDFS-12943
Commit: d15dc436598d646de67b553207ab6624741f56a5
Parents: 99e201d
Author: Bharat Viswanadham 
Authored: Sat Dec 1 16:52:23 2018 -0800
Committer: Bharat Viswanadham 
Committed: Sat Dec 1 16:52:23 2018 -0800

--
 .../hadoop/utils/db/ByteArrayKeyValue.java  |  67 ++
 .../java/org/apache/hadoop/utils/db/Codec.java  |  38 +++
 .../apache/hadoop/utils/db/CodecRegistry.java   |  70 ++
 .../org/apache/hadoop/utils/db/DBStore.java |  12 +-
 .../org/apache/hadoop/utils/db/RDBStore.java|  12 +-
 .../hadoop/utils/db/RDBStoreIterator.java   |  20 +-
 .../org/apache/hadoop/utils/db/RDBTable.java|   7 +-
 .../org/apache/hadoop/utils/db/StringCodec.java |  45 
 .../java/org/apache/hadoop/utils/db/Table.java  |  63 +
 .../apache/hadoop/utils/db/TableIterator.java   |   6 +-
 .../org/apache/hadoop/utils/db/TypedTable.java  | 184 +++
 .../hadoop/utils/db/TestDBStoreBuilder.java |   4 +-
 .../apache/hadoop/utils/db/TestRDBStore.java|  31 +--
 .../hadoop/utils/db/TestRDBTableStore.java  |   4 +-
 .../hadoop/utils/db/TestTypedRDBTableStore.java | 235 +++
 .../hadoop/ozone/om/OMMetadataManager.java  |  17 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |   4 +-
 .../hadoop/ozone/om/OmMetadataManagerImpl.java  |  82 ---
 18 files changed, 765 insertions(+), 136 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d15dc436/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/ByteArrayKeyValue.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/ByteArrayKeyValue.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/ByteArrayKeyValue.java
new file mode 100644
index 000..ca5583c
--- /dev/null
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/ByteArrayKeyValue.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+package org.apache.hadoop.utils.db;
+
+import org.apache.hadoop.utils.db.Table.KeyValue;
+
+/**
+ * Key value for raw Table implementations.
+ */
+public final class ByteArrayKeyValue implements KeyValue {
+  private byte[] key;
+  private byte[] value;
+
+  private ByteArrayKeyValue(byte[] key, byte[] value) {
+this.key = key;
+this.value = value;
+  }
+
+  /**
+   * Create a KeyValue pair.
+   *
+   * @param key   - Key Bytes
+   * @param value - Value bytes
+   * @return KeyValue object.
+   */
+  public static ByteArrayKeyValue create(byte[] key, byte[] value) {
+return new ByteArrayKeyValue(key, value);
+  }
+
+  /**
+   * Return key.
+   *
+   * @return byte[]
+   */
+  public byte[] getKey() {
+byte[] result = new byte[key.length];
+System.arraycopy(key, 0, result, 0, key.length);
+return result;
+  }
+
+  /**
+   * Return value.
+   *
+   * @return byte[]
+   */
+  public byte[] getValue() {
+byte[] result = new byte[value.length];
+System.arraycopy(value, 0, result, 0, value.length);
+return result;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d15dc436/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Codec.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Codec.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Codec.java
new file mode 100644
index 000..7f6f489
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/db/Codec.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * 

[23/50] [abbrv] hadoop git commit: HDDS-886. Unnecessary buffer copy in HddsDispatcher#dispatch. Contributed by Lokesh Jain.

2018-12-05 Thread shv
HDDS-886. Unnecessary buffer copy in HddsDispatcher#dispatch. Contributed by 
Lokesh Jain.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62f82111
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62f82111
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62f82111

Branch: refs/heads/HDFS-12943
Commit: 62f821115be34f26e994790591c235710f0fc224
Parents: 7ccb640
Author: Yiqun Lin 
Authored: Fri Nov 30 22:57:29 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Nov 30 22:57:29 2018 +0800

--
 .../apache/hadoop/ozone/container/common/impl/HddsDispatcher.java  | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62f82111/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 24ba784..352cc86 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -134,9 +134,9 @@ public class HddsDispatcher implements ContainerDispatcher, 
Auditor {
   @Override
   public ContainerCommandResponseProto dispatch(
   ContainerCommandRequestProto msg) {
+Preconditions.checkNotNull(msg);
 LOG.trace("Command {}, trace ID: {} ", msg.getCmdType().toString(),
 msg.getTraceID());
-Preconditions.checkNotNull(msg.toString());
 
 AuditAction action = ContainerCommandRequestPBHelper.getAuditAction(
 msg.getCmdType());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: HADOOP-15966. Hadoop Kerberos broken on macos as java.security.krb5.realm is reset.

2018-12-05 Thread shv
HADOOP-15966. Hadoop Kerberos broken on macos as java.security.krb5.realm is 
reset.

Contributed by Steve Loughran


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db2d8b01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db2d8b01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db2d8b01

Branch: refs/heads/HDFS-12943
Commit: db2d8b01c65fdff3acf83b4c4f0a79fe48270487
Parents: 7274115
Author: Steve Loughran 
Authored: Tue Dec 4 15:35:43 2018 +
Committer: Steve Loughran 
Committed: Tue Dec 4 15:35:43 2018 +

--
 .../hadoop-common/src/main/conf/hadoop-env.sh  | 13 -
 1 file changed, 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db2d8b01/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh 
b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
index 64d6bcb..e43cd95 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-env.sh
@@ -96,19 +96,6 @@
 # section as to why
 export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
 
-
-# Under certain conditions, Java on OS X will throw SCDynamicStore errors
-# in the system logs.
-# See HADOOP-8719 for more information.  If one needs Kerberos
-# support on OS X, one will want to change/remove this extra bit.
-case ${HADOOP_OS_TYPE} in
-  Darwin*)
-export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.realm= "
-export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.kdc= "
-export HADOOP_OPTS="${HADOOP_OPTS} -Djava.security.krb5.conf= "
-  ;;
-esac
-
 # Extra Java runtime options for some Hadoop commands
 # and clients (i.e., hdfs dfs -blah).  These get appended to HADOOP_OPTS for
 # such commands.  In most cases, # this should be left empty and


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[47/50] [abbrv] hadoop git commit: Revert "YARN-8870. [Submarine] Add submarine installation scripts. (Xun Liu via wangda)"

2018-12-05 Thread shv
http://git-wip-us.apache.org/repos/asf/hadoop/blob/228156cf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/scripts/utils.sh
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/scripts/utils.sh
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/scripts/utils.sh
deleted file mode 100644
index 7b3c2a9..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/scripts/utils.sh
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-## @description  check install user
-## @audience public
-## @stabilitystable
-function check_install_user()
-{
-  if [[ $(id -u) -ne 0 ]];then
-echo "This script must be run with a ROOT user!"
-exit # don't call exit_install()
-  fi
-}
-
-## @description  exit install
-## @audience public
-## @stabilitystable
-function exit_install()
-{
-  echo "Exit the installation!" | tee -a $LOG
-  exit $1
-}
-
-## @description  Check if the IP address format is correct
-## @audience public
-## @stabilitystable
-function valid_ip()
-{
-  local ip=$1
-  local stat=1
-
-  if [[ $ip =~ ^[0-9]{1,3\}.[0-9]{1,3\}.[0-9]{1,3\}.[0-9]{1,3\}$ ]]; then
-OIFS=$IFS
-IFS='.'
-ip=($ip)
-IFS=$OIFS
-
-if [[ ${ip[0]} -le 255 && ${ip[1]} -le 255 && ${ip[2]} -le 255 && ${ip[3]} 
-le 255 ]]; then
-  stat=$?
-fi
-  fi
-
-  return $stat
-}
-
-## @description  Check if the configuration file configuration is correct
-## @audience public
-## @stabilitystable
-function check_install_conf()
-{
-  echo "Check if the configuration file configuration is correct ..." | tee -a 
$LOG
-
-  # check etcd conf
-  hostCount=${#ETCD_HOSTS[@]}
-  if [[ $hostCount -lt 3 && hostCount -ne 0 ]]; then # <>2
-echo "Number of nodes = [$hostCount], must be configured to be greater 
than or equal to 3 servers! " | tee -a $LOG
-exit_install
-  fi
-  for ip in ${ETCD_HOSTS[@]}
-  do
-if ! valid_ip $ip; then
-  echo "]ETCD_HOSTS=[$ip], IP address format is incorrect! " | tee -a $LOG
-  exit_install
-fi
-  done
-  echo "Check if the configuration file configuration is correct [ Done ]" | 
tee -a $LOG
-}
-
-## @description  index by EtcdHosts list
-## @audience public
-## @stabilitystable
-function indexByEtcdHosts() {
-  index=0
-  while [ "$index" -lt "${#ETCD_HOSTS[@]}" ]; do
-if [ "${ETCD_HOSTS[$index]}" = "$1" ]; then
-  echo $index
-  return
-fi
-let "index++"
-  done
-  echo ""
-}
-
-## @description  get local IP
-## @audience public
-## @stabilitystable
-function getLocalIP()
-{
-  local _ip _myip _line _nl=$'\n'
-  while IFS=$': \t' read -a _line ;do
-  [ -z "${_line%inet}" ] &&
- _ip=${_line[${#_line[1]}>4?1:2]} &&
- [ "${_ip#127.0.0.1}" ] && _myip=$_ip
-done< <(LANG=C /sbin/ifconfig)
-  printf ${1+-v} $1 "%s${_nl:0:$[${#1}>0?0:1]}" $_myip
-}
-
-## @description  get ip list
-## @audience public
-## @stabilitystable
-function get_ip_list()
-{
-  array=$(ifconfig | grep inet | grep -v inet6 | grep -v 127 | sed 's/^[ 
\t]*//g' | cut -d ' ' -f2)
-
-  for ip in ${array[@]}
-  do
-LOCAL_HOST_IP_LIST+=(${ip})
-  done
-}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[29/50] [abbrv] hadoop git commit: HDDS-887. Add DispatcherContext info to Dispatcher from containerStateMachine. Contributed by Shashikant Banerjee.

2018-12-05 Thread shv
HDDS-887. Add DispatcherContext info to Dispatcher from containerStateMachine. 
Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5a3c7714
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5a3c7714
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5a3c7714

Branch: refs/heads/HDFS-12943
Commit: 5a3c7714c4d7822827ec365ea187fa8f43eb0e45
Parents: d15dc43
Author: Shashikant Banerjee 
Authored: Sun Dec 2 08:00:35 2018 +0530
Committer: Shashikant Banerjee 
Committed: Sun Dec 2 08:00:35 2018 +0530

--
 .../main/proto/DatanodeContainerProtocol.proto  |   8 --
 .../container/common/impl/HddsDispatcher.java   |   8 +-
 .../common/interfaces/ContainerDispatcher.java  |   5 +-
 .../container/common/interfaces/Handler.java|   4 +-
 .../transport/server/GrpcXceiverService.java|   3 +-
 .../transport/server/XceiverServerGrpc.java |   2 +-
 .../server/ratis/ContainerStateMachine.java | 120 +++--
 .../server/ratis/DispatcherContext.java | 133 +++
 .../container/keyvalue/KeyValueHandler.java |  75 +++
 .../container/keyvalue/helpers/BlockUtils.java  |   8 +-
 .../keyvalue/helpers/SmallFileUtils.java|  10 +-
 .../keyvalue/impl/ChunkManagerImpl.java |   4 +-
 .../keyvalue/interfaces/ChunkManager.java   |   5 +-
 .../common/impl/TestHddsDispatcher.java |  14 +-
 .../keyvalue/TestChunkManagerImpl.java  |  17 +--
 .../container/keyvalue/TestKeyValueHandler.java |  48 +++
 .../common/impl/TestContainerPersistence.java   |  20 +--
 .../transport/server/ratis/TestCSMMetrics.java  |   3 +-
 .../container/server/TestContainerServer.java   |   6 +-
 .../genesis/BenchMarkDatanodeDispatcher.java|  16 +--
 20 files changed, 321 insertions(+), 188 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a3c7714/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 5237af8..661d910 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -373,17 +373,10 @@ enum ChecksumType {
 MD5 = 5;
 }
 
-enum Stage {
-WRITE_DATA = 1;
-COMMIT_DATA = 2;
-COMBINED = 3;
-}
-
 message  WriteChunkRequestProto  {
   required DatanodeBlockID blockID = 1;
   required ChunkInfo chunkData = 2;
   optional bytes data = 3;
-  optional Stage stage = 4 [default = COMBINED];
 }
 
 message  WriteChunkResponseProto {
@@ -392,7 +385,6 @@ message  WriteChunkResponseProto {
 message  ReadChunkRequestProto  {
   required DatanodeBlockID blockID = 1;
   required ChunkInfo chunkData = 2;
-  optional bool readFromTmpFile = 3 [default = false];
 }
 
 message  ReadChunkResponseProto {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5a3c7714/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 352cc86..c5c51a3 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -47,6 +47,8 @@ import 
org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.apache.hadoop.ozone.container.common.transport.server.ratis
+.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -133,7 +135,7 @@ public class HddsDispatcher implements ContainerDispatcher, 
Auditor {
 
   @Override
   public ContainerCommandResponseProto dispatch(
-  ContainerCommandRequestProto msg) {
+  ContainerCommandRequestProto msg, DispatcherContext dispatcherContext) {
 Preconditions.checkNotNull(msg);
 LOG.trace("Command {}, trace ID: {} ", msg.getCmdType().toString(),
 msg.getTraceID());
@@ -194,7 +196,7 @@ public class HddsDispatcher implements 

[34/50] [abbrv] hadoop git commit: HDFS-13818. Extend OIV to detect FSImage corruption. Contributed by Adam Antal.

2018-12-05 Thread shv
HDFS-13818. Extend OIV to detect FSImage corruption. Contributed by Adam Antal.

Signed-off-by: Wei-Chiu Chuang 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb10803d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb10803d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb10803d

Branch: refs/heads/HDFS-12943
Commit: fb10803dfa67394650072bdea327296f8ad2a744
Parents: dd5e7c6
Author: Adam Antal 
Authored: Mon Dec 3 10:33:51 2018 -0800
Committer: Wei-Chiu Chuang 
Committed: Mon Dec 3 10:34:39 2018 -0800

--
 .../OfflineImageViewerPB.java   |  27 +-
 .../offlineImageViewer/PBImageCorruption.java   | 107 ++
 .../PBImageCorruptionDetector.java  | 344 +++
 .../PBImageDelimitedTextWriter.java |  38 +-
 .../offlineImageViewer/PBImageTextWriter.java   | 220 ++--
 .../src/site/markdown/HdfsImageViewer.md|  36 +-
 .../TestOfflineImageViewer.java | 219 +++-
 .../TestPBImageCorruption.java  |  55 +++
 .../test/resources/testMultipleCorruption.csv   |  22 ++
 .../resources/testMultipleFileCorruption.csv|  21 ++
 .../test/resources/testSingleFileCorruption.csv |  18 +
 .../resources/testSingleFolderCorruption.csv|  18 +
 12 files changed, 1052 insertions(+), 73 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb10803d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
index 782ace8..33b9364 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/OfflineImageViewerPB.java
@@ -79,6 +79,12 @@ public class OfflineImageViewerPB {
   + "to both inodes and inodes-under-construction, separated by a\n"
   + "delimiter. The default delimiter is \\t, though this may be\n"
   + "changed via the -delimiter argument.\n"
+  + "  * DetectCorruption: Detect potential corruption of the image by\n"
+  + "selectively loading parts of it and actively searching for\n"
+  + "inconsistencies. Outputs a summary of the found corruptions\n"
+  + "in a delimited format.\n"
+  + "Note that the check is not exhaustive, and only catches\n"
+  + "missing nodes during the namespace reconstruction.\n"
   + "\n"
   + "Required command line arguments:\n"
   + "-i,--inputFileFSImage or XML file to process.\n"
@@ -91,12 +97,15 @@ public class OfflineImageViewerPB {
   + "   will also create an .md5 file.\n"
   + "-p,--processorSelect which type of processor to apply\n"
   + "   against image file. (XML|FileDistribution|\n"
-  + "   ReverseXML|Web|Delimited)\n"
+  + "   ReverseXML|Web|Delimited|DetectCorruption)\n"
   + "   The default is Web.\n"
-  + "-delimiterDelimiting string to use with Delimited 
processor.  \n"
-  + "-t,--temp Use temporary dir to cache intermediate result 
to generate\n"
-  + "   Delimited outputs. If not set, Delimited 
processor constructs\n"
-  + "   the namespace in memory before outputting 
text.\n"
+  + "-delimiterDelimiting string to use with Delimited or \n"
+  + "   DetectCorruption processor. \n"
+  + "-t,--temp Use temporary dir to cache intermediate\n"
+  + "   result to generate DetectCorruption or\n"
+  + "   Delimited outputs. If not set, the processor\n"
+  + "   constructs the namespace in memory \n"
+  + "   before outputting text.\n"
   + "-h,--help  Display usage information and exit\n";
 
   /**
@@ -172,7 +181,7 @@ public class OfflineImageViewerPB {
 String processor = cmd.getOptionValue("p", "Web");
 String outputFile = cmd.getOptionValue("o", "-");
 String delimiter = cmd.getOptionValue("delimiter",
-PBImageDelimitedTextWriter.DEFAULT_DELIMITER);
+PBImageTextWriter.DEFAULT_DELIMITER);
 String tempPath = cmd.getOptionValue("t", "");
 
 Configuration conf = new Configuration();
@@ -219,6 +228,12 @@ public 

[49/50] [abbrv] hadoop git commit: YARN-9057. Removed third party class bundle from CSI jar file. Contributed by Weiwei Yang

2018-12-05 Thread shv
YARN-9057.  Removed third party class bundle from CSI jar file.
Contributed by Weiwei Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1dabb31c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1dabb31c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1dabb31c

Branch: refs/heads/HDFS-12943
Commit: 1dabb31cdf907cbee418c469368a59393fd52844
Parents: 228156c
Author: Eric Yang 
Authored: Wed Dec 5 15:56:10 2018 -0500
Committer: Eric Yang 
Committed: Wed Dec 5 15:56:10 2018 -0500

--
 .../resources/assemblies/hadoop-yarn-dist.xml   |  4 +
 .../hadoop-yarn/hadoop-yarn-csi/pom.xml | 78 
 2 files changed, 19 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1dabb31c/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index 4055acb..a5c3c0e 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -220,6 +220,10 @@
   
hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/target/lib
   
share/hadoop/${hadoop.component}/timelineservice/lib
 
+
+  hadoop-yarn/hadoop-yarn-csi/target/lib
+  
share/hadoop/${hadoop.component}/csi/lib
+
   
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1dabb31c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
index 27d8452..1a19f0e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
@@ -86,14 +86,17 @@
 
 org.apache.hadoop
 hadoop-common
+provided
 
 
 org.apache.hadoop
 hadoop-yarn-common
+provided
 
 
 org.apache.hadoop
 hadoop-yarn-api
+provided
 
 
 javax.annotation
@@ -114,6 +117,18 @@
 
 org.apache.maven.plugins
 maven-dependency-plugin
+
+
+package
+
+copy-dependencies
+
+
+runtime
+
${project.build.directory}/lib
+
+
+
 
 
 org.apache.maven.plugins
@@ -148,69 +163,6 @@
 
 
 
-org.apache.maven.plugins
-maven-shade-plugin
-${maven-shade-plugin.version}
-
-
false
-
-
-
-package
-
-shade
-
-
-
-
-io.grpc
-csi.io.grpc
-
-
-
-com.google
-
csi.com.google
-
-
-io.netty
-csi.io.netty
-
-
-
-
-
-
-
-
-
-
-
-org.apache.maven.plugins
-maven-antrun-plugin
-
-
-unpack
-package
-
-
-
-
-
-
-
-
-
-
-
-run
-  

[26/50] [abbrv] hadoop git commit: HDFS-14106. Refactor NamenodeFsck#copyBlock. Contributed by Beluga Behr.

2018-12-05 Thread shv
HDFS-14106. Refactor NamenodeFsck#copyBlock. Contributed by Beluga Behr.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b09cfad4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b09cfad4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b09cfad4

Branch: refs/heads/HDFS-12943
Commit: b09cfad43268765f0b35af115b82ddb8ac87a3a4
Parents: 6d7b44c
Author: Giovanni Matteo Fumarola 
Authored: Fri Nov 30 10:47:59 2018 -0800
Committer: Giovanni Matteo Fumarola 
Committed: Fri Nov 30 10:47:59 2018 -0800

--
 .../hdfs/server/namenode/NamenodeFsck.java  | 37 +++-
 1 file changed, 21 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b09cfad4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index f54b407..b4c0a93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -1091,28 +1091,33 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 deadNodes.add(chosenNode);
   }
 }
-byte[] buf = new byte[1024];
-int cnt = 0;
-boolean success = true;
-long bytesRead = 0;
+
+long bytesRead = 0L;
 try {
-  while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
-fos.write(buf, 0, cnt);
-bytesRead += cnt;
-  }
-  if ( bytesRead != block.getNumBytes() ) {
-throw new IOException("Recorded block size is " + block.getNumBytes() +
-  ", but datanode returned " +bytesRead+" bytes");
-  }
+  bytesRead = copyBock(blockReader, fos);
 } catch (Exception e) {
-  LOG.error("Error reading block", e);
-  success = false;
+  throw new Exception("Could not copy block data for " + lblock.getBlock(),
+  e);
 } finally {
   blockReader.close();
 }
-if (!success) {
-  throw new Exception("Could not copy block data for " + 
lblock.getBlock());
+
+if (bytesRead != block.getNumBytes()) {
+  throw new IOException("Recorded block size is " + block.getNumBytes()
+  + ", but datanode returned " + bytesRead + " bytes");
+}
+  }
+
+  private long copyBock(BlockReader blockReader, OutputStream os)
+  throws IOException {
+final byte[] buf = new byte[8192];
+int cnt = 0;
+long bytesRead = 0L;
+while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
+  os.write(buf, 0, cnt);
+  bytesRead += cnt;
 }
+return bytesRead;
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HDFS-12946. Add a tool to check rack configuration against EC policies. Contributed by Kitti Nanasi.

2018-12-05 Thread shv
HDFS-12946. Add a tool to check rack configuration against EC policies. 
Contributed by Kitti Nanasi.

Signed-off-by: Wei-Chiu Chuang 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd5e7c6b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd5e7c6b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd5e7c6b

Branch: refs/heads/HDFS-12943
Commit: dd5e7c6b7239a93f2391beaa11181e442a387db4
Parents: 3044b78
Author: Kitti Nanasi 
Authored: Mon Dec 3 09:59:56 2018 -0800
Committer: Wei-Chiu Chuang 
Committed: Mon Dec 3 10:01:09 2018 -0800

--
 .../federation/metrics/NamenodeBeanMetrics.java |   5 +
 .../server/blockmanagement/DatanodeManager.java |   7 +
 .../hdfs/server/common/ECTopologyVerifier.java  | 124 +++
 .../namenode/ECTopologyVerifierResult.java  |  45 ++
 .../namenode/ErasureCodingPolicyManager.java|   9 ++
 .../hdfs/server/namenode/FSNamesystem.java  |  19 +++
 .../hdfs/server/namenode/NameNodeMXBean.java|   7 +
 .../org/apache/hadoop/hdfs/tools/ECAdmin.java   |  51 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  35 +
 .../hdfs/TestErasureCodingMultipleRacks.java|  18 +--
 .../server/namenode/TestNameNodeMXBean.java |  19 +++
 .../apache/hadoop/hdfs/tools/TestECAdmin.java   | 157 +++
 12 files changed, 479 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5e7c6b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
index e8ebf0d..0ca5f73 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/metrics/NamenodeBeanMetrics.java
@@ -706,4 +706,9 @@ public class NamenodeBeanMetrics
   public int getNumEncryptionZones() {
 return 0;
   }
+
+  @Override
+  public String getVerifyECWithTopologyResult() {
+return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5e7c6b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 430c0d4..8a3f57b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1253,6 +1253,13 @@ public class DatanodeManager {
 return getDatanodeListForReport(DatanodeReportType.DEAD).size();
   }
 
+  /** @return the number of datanodes. */
+  public int getNumOfDataNodes() {
+synchronized (this) {
+  return datanodeMap.size();
+}
+  }
+
   /** @return list of datanodes where decommissioning is in progress. */
   public List getDecommissioningNodes() {
 // There is no need to take namesystem reader lock as

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5e7c6b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/ECTopologyVerifier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/ECTopologyVerifier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/ECTopologyVerifier.java
new file mode 100644
index 000..3591b2d
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/ECTopologyVerifier.java
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership. The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in 

[08/50] [abbrv] hadoop git commit: HDDS-642. Add chill mode exit condition for pipeline availability. Contributed by Yiqun Lin.

2018-12-05 Thread shv
HDDS-642. Add chill mode exit condition for pipeline availability. Contributed 
by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b71cc7f3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b71cc7f3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b71cc7f3

Branch: refs/heads/HDFS-12943
Commit: b71cc7f33edbbf6a98d1efb330f1c748b5dd6e75
Parents: efc4d91
Author: Ajay Kumar 
Authored: Wed Nov 28 17:45:46 2018 -0800
Committer: Ajay Kumar 
Committed: Wed Nov 28 17:47:57 2018 -0800

--
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   5 +
 .../common/src/main/resources/ozone-default.xml |   9 ++
 .../scm/chillmode/PipelineChillModeRule.java| 108 +++
 .../hdds/scm/chillmode/SCMChillModeManager.java |  19 +++-
 .../scm/server/StorageContainerManager.java |   5 +-
 .../scm/chillmode/TestSCMChillModeManager.java  |  81 --
 6 files changed, 213 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b71cc7f3/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index 2d28a5b..f16503e 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -87,6 +87,11 @@ public final class HddsConfigKeys {
   "hdds.scm.chillmode.min.datanode";
   public static final int HDDS_SCM_CHILLMODE_MIN_DATANODE_DEFAULT = 1;
 
+  public static final String HDDS_SCM_CHILLMODE_PIPELINE_AVAILABILITY_CHECK =
+  "hdds.scm.chillmode.pipeline-availability.check";
+  public static final boolean
+  HDDS_SCM_CHILLMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT = false;
+
   // % of containers which should have at least one reported replica
   // before SCM comes out of chill mode.
   public static final String HDDS_SCM_CHILLMODE_THRESHOLD_PCT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b71cc7f3/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 9f3d7e1..aa22b2b 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -1232,6 +1232,15 @@
   
 
   
+hdds.scm.chillmode.pipeline-availability.check
+false
+HDDS,SCM,OPERATION
+
+  Boolean value to enable pipeline availability check during SCM chill 
mode.
+
+  
+
+  
 hdds.container.action.max.limit
 20
 DATANODE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b71cc7f3/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/PipelineChillModeRule.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/PipelineChillModeRule.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/PipelineChillModeRule.java
new file mode 100644
index 000..f9a6e59
--- /dev/null
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/chillmode/PipelineChillModeRule.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.chillmode;
+
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import 

[14/50] [abbrv] hadoop git commit: HDDS-850. ReadStateMachineData hits OverlappingFileLockException in ContainerStateMachine. Contributed by Shashikant Banerjee.

2018-12-05 Thread shv
HDDS-850. ReadStateMachineData hits OverlappingFileLockException in 
ContainerStateMachine. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e102f9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e102f9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e102f9a

Branch: refs/heads/HDFS-12943
Commit: 5e102f9aa54d3057ef5f0755d45428f22a24990b
Parents: 7eb0d3a
Author: Shashikant Banerjee 
Authored: Thu Nov 29 22:20:08 2018 +0530
Committer: Shashikant Banerjee 
Committed: Thu Nov 29 22:20:08 2018 +0530

--
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |   8 ++
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   9 ++
 .../main/proto/DatanodeContainerProtocol.proto  |   1 +
 .../common/src/main/resources/ozone-default.xml |   8 ++
 .../server/ratis/ContainerStateMachine.java | 134 +++
 .../server/ratis/XceiverServerRatis.java|  14 +-
 .../container/keyvalue/KeyValueHandler.java |   7 +-
 .../keyvalue/impl/ChunkManagerImpl.java |  11 +-
 .../keyvalue/interfaces/ChunkManager.java   |   5 +-
 .../keyvalue/TestChunkManagerImpl.java  |   6 +-
 .../common/impl/TestContainerPersistence.java   |  11 +-
 11 files changed, 143 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e102f9a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 6733b8e..062b101 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -93,6 +93,14 @@ public final class ScmConfigKeys {
   public static final String DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE =
   "dfs.container.ratis.log.queue.size";
   public static final int DFS_CONTAINER_RATIS_LOG_QUEUE_SIZE_DEFAULT = 128;
+
+  // expiry interval stateMachineData cache entry inside containerStateMachine
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL =
+  "dfs.container.ratis.statemachine.cache.expiry.interval";
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT =
+  "10s";
   public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
   "dfs.ratis.client.request.timeout.duration";
   public static final TimeDuration

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e102f9a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 879f773..df233f7 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -249,6 +249,15 @@ public final class OzoneConfigKeys {
   DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT =
   ScmConfigKeys.DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT;
 
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL =
+  ScmConfigKeys.
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL;
+  public static final String
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT =
+  ScmConfigKeys.
+  DFS_CONTAINER_RATIS_STATEMACHINEDATA_CACHE_EXPIRY_INTERVAL_DEFAULT;
+
   public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
   "dfs.container.ratis.datanode.storage.dir";
   public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e102f9a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
--
diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto 
b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
index 3695b6b..5237af8 100644
--- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
@@ -392,6 +392,7 @@ message  WriteChunkResponseProto {
 message  ReadChunkRequestProto  {
   required DatanodeBlockID blockID = 1;
   required ChunkInfo chunkData = 2;
+  optional bool readFromTmpFile = 3 [default = false];
 }
 
 message  

[18/50] [abbrv] hadoop git commit: HDFS-14112. Avoid recursive call to external authorizer for getContentSummary.

2018-12-05 Thread shv
HDFS-14112. Avoid recursive call to external authorizer for getContentSummary.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0081b02e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0081b02e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0081b02e

Branch: refs/heads/HDFS-12943
Commit: 0081b02e35306cb757c63d0f11a536941d73a139
Parents: ae5fbdd
Author: Tsz Wo Nicholas Sze 
Authored: Thu Nov 29 13:55:21 2018 -0800
Committer: Tsz Wo Nicholas Sze 
Committed: Thu Nov 29 13:55:21 2018 -0800

--
 .../main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java   |  4 
 .../hdfs/server/namenode/FSDirStatAndListingOp.java   |  5 +
 .../apache/hadoop/hdfs/server/namenode/FSDirectory.java   |  7 +++
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml   | 10 ++
 4 files changed, 26 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0081b02e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3628b2b..5899c92 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -284,6 +284,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT;
   public static final String  DFS_PERMISSIONS_ENABLED_KEY =
   HdfsClientConfigKeys.DeprecatedKeys.DFS_PERMISSIONS_ENABLED_KEY;
+  public static final String  DFS_PERMISSIONS_CONTENT_SUMMARY_SUBACCESS_KEY
+  = "dfs.permissions.ContentSummary.subAccess";
+  public static final boolean DFS_PERMISSIONS_CONTENT_SUMMARY_SUBACCESS_DEFAULT
+  = false;
   public static final boolean DFS_PERMISSIONS_ENABLED_DEFAULT = true;
   public static final String  DFS_PERMISSIONS_SUPERUSERGROUP_KEY =
   HdfsClientConfigKeys.DeprecatedKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0081b02e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 01de236..052e522 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -128,6 +128,11 @@ class FSDirStatAndListingOp {
   static ContentSummary getContentSummary(
   FSDirectory fsd, FSPermissionChecker pc, String src) throws IOException {
 final INodesInPath iip = fsd.resolvePath(pc, src, DirOp.READ_LINK);
+if (fsd.isPermissionEnabled() && 
fsd.isPermissionContentSummarySubAccess()) {
+  fsd.checkPermission(pc, iip, false, null, null, null,
+  FsAction.READ_EXECUTE);
+  pc = null;
+}
 // getContentSummaryInt() call will check access (if enabled) when
 // traversing all sub directories.
 return getContentSummaryInt(fsd, pc, iip);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0081b02e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 712a327..45f859c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -175,6 +175,7 @@ public class FSDirectory implements Closeable {
   private final ReentrantReadWriteLock dirLock;
 
   private final boolean isPermissionEnabled;
+  private final boolean isPermissionContentSummarySubAccess;
   /**
* Support for ACLs is controlled by a configuration flag. If the
* configuration flag is false, then the NameNode will reject all
@@ -274,6 +275,9 @@ public class FSDirectory implements Closeable {
 this.isPermissionEnabled = conf.getBoolean(
   

[02/50] [abbrv] hadoop git commit: YARN-8882. [YARN-8851] Add a shared device mapping manager (scheduler) for device plugins. (Zhankun Tang via wangda)

2018-12-05 Thread shv
YARN-8882. [YARN-8851] Add a shared device mapping manager (scheduler) for 
device plugins. (Zhankun Tang via wangda)

Change-Id: I9435136642c3d556971a357bf687f69df90bb45e


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/579ef4be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/579ef4be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/579ef4be

Branch: refs/heads/HDFS-12943
Commit: 579ef4be063745c5211127eca83a393ceddc8b79
Parents: 9de8e8d
Author: Wangda Tan 
Authored: Wed Nov 28 14:09:52 2018 -0800
Committer: Wangda Tan 
Committed: Wed Nov 28 14:09:52 2018 -0800

--
 .../resourceplugin/ResourcePluginManager.java   |  14 +-
 .../deviceframework/DeviceMappingManager.java   | 324 
 .../deviceframework/DevicePluginAdapter.java|  20 +-
 .../DeviceResourceHandlerImpl.java  | 145 +++
 .../TestDeviceMappingManager.java   | 366 +
 .../TestDevicePluginAdapter.java| 388 ++-
 6 files changed, 1245 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/579ef4be/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
index 9741b12..6dfe817 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/resourceplugin/ResourcePluginManager.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
 import org.apache.hadoop.yarn.server.nodemanager.Context;
 import org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DevicePlugin;
 import 
org.apache.hadoop.yarn.server.nodemanager.api.deviceplugin.DeviceRegisterRequest;
+import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework.DeviceMappingManager;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.deviceframework.DevicePluginAdapter;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.fpga.FpgaResourcePlugin;
 import 
org.apache.hadoop.yarn.server.nodemanager.containermanager.resourceplugin.gpu.GpuResourcePlugin;
@@ -52,12 +53,13 @@ import static 
org.apache.hadoop.yarn.api.records.ResourceInformation.GPU_URI;
 public class ResourcePluginManager {
   private static final Logger LOG =
   LoggerFactory.getLogger(ResourcePluginManager.class);
-  private static final Set SUPPORTED_RESOURCE_PLUGINS = 
ImmutableSet.of(
-  GPU_URI, FPGA_URI);
+  private static final Set SUPPORTED_RESOURCE_PLUGINS =
+  ImmutableSet.of(GPU_URI, FPGA_URI);
 
   private Map configuredPlugins =
   Collections.emptyMap();
 
+  private DeviceMappingManager deviceMappingManager = null;
 
   public synchronized void initialize(Context context)
   throws YarnException, ClassNotFoundException {
@@ -123,7 +125,7 @@ public class ResourcePluginManager {
   throws YarnRuntimeException, ClassNotFoundException {
 LOG.info("The pluggable device framework enabled," +
 "trying to load the vendor plugins");
-
+deviceMappingManager = new DeviceMappingManager(context);
 String[] pluginClassNames = configuration.getStrings(
 YarnConfiguration.NM_PLUGGABLE_DEVICE_FRAMEWORK_DEVICE_CLASSES);
 if (null == pluginClassNames) {
@@ -174,7 +176,7 @@ public class ResourcePluginManager {
   resourceName,
   pluginClassName);
   DevicePluginAdapter pluginAdapter = new DevicePluginAdapter(
-  resourceName, dpInstance);
+  resourceName, dpInstance, deviceMappingManager);
   LOG.info("Adapter of {} created. Initializing..", pluginClassName);
   try {
 pluginAdapter.initialize(context);
@@ -235,6 +237,10 @@ public class ResourcePluginManager {
 return true;
   }
 
+  public DeviceMappingManager getDeviceMappingManager() {
+return deviceMappingManager;
+  }
+
   public synchronized void cleanup() throws YarnException {
  

[12/50] [abbrv] hadoop git commit: HDDS-808. Simplify OMAction and DNAction classes used for AuditLogging. Contributed by Dinesh Chitlangia.

2018-12-05 Thread shv
HDDS-808. Simplify OMAction and DNAction classes used for AuditLogging. 
Contributed by Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/184cced5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/184cced5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/184cced5

Branch: refs/heads/HDFS-12943
Commit: 184cced513c5599d7b33c9124692fbcd2e6d338e
Parents: 07142f5
Author: Ajay Kumar 
Authored: Thu Nov 29 08:35:02 2018 -0800
Committer: Ajay Kumar 
Committed: Thu Nov 29 08:35:20 2018 -0800

--
 .../org/apache/hadoop/ozone/audit/DNAction.java | 44 +++-
 .../apache/hadoop/ozone/audit/DummyAction.java  | 36 ++---
 .../org/apache/hadoop/ozone/audit/OMAction.java | 54 +---
 3 files changed, 58 insertions(+), 76 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/184cced5/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java
index ce34c46..1c87f2b 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java
@@ -21,34 +21,28 @@ package org.apache.hadoop.ozone.audit;
  */
 public enum DNAction implements AuditAction {
 
-  CREATE_CONTAINER("CREATE_CONTAINER"),
-  READ_CONTAINER("READ_CONTAINER"),
-  UPDATE_CONTAINER("UPDATE_CONTAINER"),
-  DELETE_CONTAINER("DELETE_CONTAINER"),
-  LIST_CONTAINER("LIST_CONTAINER"),
-  PUT_BLOCK("PUT_BLOCK"),
-  GET_BLOCK("GET_BLOCK"),
-  DELETE_BLOCK("DELETE_BLOCK"),
-  LIST_BLOCK("LIST_BLOCK"),
-  READ_CHUNK("READ_CHUNK"),
-  DELETE_CHUNK("DELETE_CHUNK"),
-  WRITE_CHUNK("WRITE_CHUNK"),
-  LIST_CHUNK("LIST_CHUNK"),
-  COMPACT_CHUNK("COMPACT_CHUNK"),
-  PUT_SMALL_FILE("PUT_SMALL_FILE"),
-  GET_SMALL_FILE("GET_SMALL_FILE"),
-  CLOSE_CONTAINER("CLOSE_CONTAINER"),
-  GET_COMMITTED_BLOCK_LENGTH("GET_COMMITTED_BLOCK_LENGTH");
-
-  private String action;
-
-  DNAction(String action) {
-this.action = action;
-  }
+  CREATE_CONTAINER,
+  READ_CONTAINER,
+  UPDATE_CONTAINER,
+  DELETE_CONTAINER,
+  LIST_CONTAINER,
+  PUT_BLOCK,
+  GET_BLOCK,
+  DELETE_BLOCK,
+  LIST_BLOCK,
+  READ_CHUNK,
+  DELETE_CHUNK,
+  WRITE_CHUNK,
+  LIST_CHUNK,
+  COMPACT_CHUNK,
+  PUT_SMALL_FILE,
+  GET_SMALL_FILE,
+  CLOSE_CONTAINER,
+  GET_COMMITTED_BLOCK_LENGTH;
 
   @Override
   public String getAction() {
-return this.action;
+return this.toString();
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/184cced5/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
--
diff --git 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
index 76cd39a..d2da3e6 100644
--- 
a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
+++ 
b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyAction.java
@@ -22,30 +22,24 @@ package org.apache.hadoop.ozone.audit;
  */
 public enum DummyAction implements AuditAction {
 
-  CREATE_VOLUME("CREATE_VOLUME"),
-  CREATE_BUCKET("CREATE_BUCKET"),
-  CREATE_KEY("CREATE_KEY"),
-  READ_VOLUME("READ_VOLUME"),
-  READ_BUCKET("READ_BUCKET"),
-  READ_KEY("READ_BUCKET"),
-  UPDATE_VOLUME("UPDATE_VOLUME"),
-  UPDATE_BUCKET("UPDATE_BUCKET"),
-  UPDATE_KEY("UPDATE_KEY"),
-  DELETE_VOLUME("DELETE_VOLUME"),
-  DELETE_BUCKET("DELETE_BUCKET"),
-  DELETE_KEY("DELETE_KEY"),
-  SET_OWNER("SET_OWNER"),
-  SET_QUOTA("SET_QUOTA");
-
-  private final String action;
-
-  DummyAction(String action) {
-this.action = action;
-  }
+  CREATE_VOLUME,
+  CREATE_BUCKET,
+  CREATE_KEY,
+  READ_VOLUME,
+  READ_BUCKET,
+  READ_KEY,
+  UPDATE_VOLUME,
+  UPDATE_BUCKET,
+  UPDATE_KEY,
+  DELETE_VOLUME,
+  DELETE_BUCKET,
+  DELETE_KEY,
+  SET_OWNER,
+  SET_QUOTA;
 
   @Override
   public String getAction() {
-return this.action;
+return this.toString();
   }
 
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/184cced5/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
--
diff --git 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java 
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
index 1d4d646..8794014 100644
--- 
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/audit/OMAction.java
+++ 

[20/50] [abbrv] hadoop git commit: HDFS-13870. WebHDFS: Document ALLOWSNAPSHOT and DISALLOWSNAPSHOT API doc. Contributed by Siyao Meng.

2018-12-05 Thread shv
HDFS-13870. WebHDFS: Document ALLOWSNAPSHOT and DISALLOWSNAPSHOT API doc. 
Contributed by Siyao Meng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e36e935
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e36e935
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e36e935

Branch: refs/heads/HDFS-12943
Commit: 0e36e935d909862401890d0a5410204504f48b31
Parents: bad1203
Author: Yiqun Lin 
Authored: Fri Nov 30 11:31:34 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Nov 30 11:31:34 2018 +0800

--
 .../hadoop-hdfs/src/site/markdown/WebHDFS.md| 24 
 1 file changed, 24 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e36e935/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
index 383eda0..8661659 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md
@@ -64,6 +64,8 @@ The HTTP REST API supports the complete 
[FileSystem](../../api/org/apache/hadoop
 * [`SETTIMES`](#Set_Access_or_Modification_Time) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setTimes)
 * [`RENEWDELEGATIONTOKEN`](#Renew_Delegation_Token) (see 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).renewDelegationToken)
 * [`CANCELDELEGATIONTOKEN`](#Cancel_Delegation_Token) (see 
[DelegationTokenAuthenticator](../../api/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.html).cancelDelegationToken)
+* [`ALLOWSNAPSHOT`](#Allow_Snapshot)
+* [`DISALLOWSNAPSHOT`](#Disallow_Snapshot)
 * [`CREATESNAPSHOT`](#Create_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).createSnapshot)
 * [`RENAMESNAPSHOT`](#Rename_Snapshot) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).renameSnapshot)
 * [`SETXATTR`](#Set_XAttr) (see 
[FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setXAttr)
@@ -1302,6 +1304,28 @@ See also: 
[HDFSErasureCoding](./HDFSErasureCoding.html#Administrative_commands).
 Snapshot Operations
 ---
 
+### Allow Snapshot
+
+* Submit a HTTP PUT request.
+
+curl -i -X PUT 
"http://:/webhdfs/v1/?op=ALLOWSNAPSHOT"
+
+The client receives a response with zero content length on success:
+
+HTTP/1.1 200 OK
+Content-Length: 0
+
+### Disallow Snapshot
+
+* Submit a HTTP PUT request.
+
+curl -i -X PUT 
"http://:/webhdfs/v1/?op=DISALLOWSNAPSHOT"
+
+The client receives a response with zero content length on success:
+
+HTTP/1.1 200 OK
+Content-Length: 0
+
 ### Create Snapshot
 
 * Submit a HTTP PUT request.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[32/50] [abbrv] hadoop git commit: HDDS-848. Create SCM metrics related to container state. Contributed by Bharat Viswanadham.

2018-12-05 Thread shv
HDDS-848. Create SCM metrics related to container state. Contributed by Bharat 
Viswanadham.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3044b78b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3044b78b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3044b78b

Branch: refs/heads/HDFS-12943
Commit: 3044b78bd0191883d5f9daf2601a58a268beed06
Parents: 042c8ef
Author: Yiqun Lin 
Authored: Mon Dec 3 17:16:34 2018 +0800
Committer: Yiqun Lin 
Committed: Mon Dec 3 17:16:34 2018 +0800

--
 .../hadoop/hdds/scm/server/SCMMXBean.java   |  5 ++
 .../scm/server/StorageContainerManager.java | 11 +++
 .../apache/hadoop/ozone/scm/TestSCMMXBean.java  | 81 ++--
 3 files changed, 91 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3044b78b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
index 4093918..dc09ceb 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMMXBean.java
@@ -59,4 +59,9 @@ public interface SCMMXBean extends ServiceRuntimeInfo {
* @return String
*/
   double getChillModeCurrentContainerThreshold();
+
+  /**
+   * Returns the container count in all states.
+   */
+  Map getContainerStateCount();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3044b78b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 2d27984..a0d5e1d 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -30,6 +30,7 @@ import com.google.protobuf.BlockingService;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.block.BlockManager;
@@ -925,6 +926,16 @@ public final class StorageContainerManager extends 
ServiceRuntimeInfoImpl
 return scmChillModeManager.getCurrentContainerThreshold();
   }
 
+  @Override
+  public Map getContainerStateCount() {
+Map nodeStateCount = new HashMap<>();
+for (HddsProtos.LifeCycleState state: HddsProtos.LifeCycleState.values()) {
+  nodeStateCount.put(state.toString(), containerManager.getContainers(
+  state).size());
+}
+return nodeStateCount;
+  }
+
   /**
* Startup options.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3044b78b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
index 3136df2..eabf5e0 100644
--- 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
@@ -20,6 +20,10 @@ package org.apache.hadoop.ozone.scm;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -31,8 +35,11 @@ import javax.management.MBeanServer;
 import javax.management.ObjectName;
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.Map;
 import java.util.Iterator;
+import 

[16/50] [abbrv] hadoop git commit: HDFS-14095. EC: Track Erasure Coding commands in DFS statistics. Contributed by Ayush Saxena.

2018-12-05 Thread shv
HDFS-14095. EC: Track Erasure Coding commands in DFS statistics. Contributed by 
Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5347368
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5347368
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5347368

Branch: refs/heads/HDFS-12943
Commit: f534736867eed962899615ca1b7eb68bcf591d17
Parents: d0edd37
Author: Brahma Reddy Battula 
Authored: Fri Nov 30 00:18:27 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Fri Nov 30 00:18:27 2018 +0530

--
 .../hadoop/hdfs/DFSOpsCountStatistics.java  |  9 +++
 .../hadoop/hdfs/DistributedFileSystem.java  | 18 ++
 .../hadoop/hdfs/TestDistributedFileSystem.java  | 63 +++-
 3 files changed, 89 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5347368/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
index 3dcf13b..b9852ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
@@ -41,6 +41,7 @@ public class DFSOpsCountStatistics extends StorageStatistics {
 
   /** This is for counting distributed file system operations. */
   public enum OpType {
+ADD_EC_POLICY("op_add_ec_policy"),
 ALLOW_SNAPSHOT("op_allow_snapshot"),
 APPEND(CommonStatisticNames.OP_APPEND),
 CONCAT("op_concat"),
@@ -51,10 +52,15 @@ public class DFSOpsCountStatistics extends 
StorageStatistics {
 CREATE_SYM_LINK("op_create_symlink"),
 DELETE(CommonStatisticNames.OP_DELETE),
 DELETE_SNAPSHOT("op_delete_snapshot"),
+DISABLE_EC_POLICY("op_disable_ec_policy"),
 DISALLOW_SNAPSHOT("op_disallow_snapshot"),
+ENABLE_EC_POLICY("op_enable_ec_policy"),
 EXISTS(CommonStatisticNames.OP_EXISTS),
 GET_BYTES_WITH_FUTURE_GS("op_get_bytes_with_future_generation_stamps"),
 GET_CONTENT_SUMMARY(CommonStatisticNames.OP_GET_CONTENT_SUMMARY),
+GET_EC_CODECS("op_get_ec_codecs"),
+GET_EC_POLICY("op_get_ec_policy"),
+GET_EC_POLICIES("op_get_ec_policies"),
 GET_FILE_BLOCK_LOCATIONS("op_get_file_block_locations"),
 GET_FILE_CHECKSUM(CommonStatisticNames.OP_GET_FILE_CHECKSUM),
 GET_FILE_LINK_STATUS("op_get_file_link_status"),
@@ -76,11 +82,13 @@ public class DFSOpsCountStatistics extends 
StorageStatistics {
 REMOVE_ACL(CommonStatisticNames.OP_REMOVE_ACL),
 REMOVE_ACL_ENTRIES(CommonStatisticNames.OP_REMOVE_ACL_ENTRIES),
 REMOVE_DEFAULT_ACL(CommonStatisticNames.OP_REMOVE_DEFAULT_ACL),
+REMOVE_EC_POLICY("op_remove_ec_policy"),
 REMOVE_XATTR("op_remove_xattr"),
 RENAME(CommonStatisticNames.OP_RENAME),
 RENAME_SNAPSHOT("op_rename_snapshot"),
 RESOLVE_LINK("op_resolve_link"),
 SET_ACL(CommonStatisticNames.OP_SET_ACL),
+SET_EC_POLICY("op_set_ec_policy"),
 SET_OWNER(CommonStatisticNames.OP_SET_OWNER),
 SET_PERMISSION(CommonStatisticNames.OP_SET_PERMISSION),
 SET_REPLICATION("op_set_replication"),
@@ -90,6 +98,7 @@ public class DFSOpsCountStatistics extends StorageStatistics {
 GET_SNAPSHOT_DIFF("op_get_snapshot_diff"),
 GET_SNAPSHOTTABLE_DIRECTORY_LIST("op_get_snapshottable_directory_list"),
 TRUNCATE(CommonStatisticNames.OP_TRUNCATE),
+UNSET_EC_POLICY("op_unset_ec_policy"),
 UNSET_STORAGE_POLICY("op_unset_storage_policy");
 
 private static final Map SYMBOL_MAP =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5347368/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index ca1546c..7dd02bd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2845,6 +2845,8 @@ public class DistributedFileSystem extends FileSystem
*/
   public void setErasureCodingPolicy(final Path path,
   final String ecPolicyName) throws IOException {
+statistics.incrementWriteOps(1);
+

[39/50] [abbrv] hadoop git commit: HDDS-894. Content-length should be set for ozone s3 ranged download. Contributed by Elek Marton.

2018-12-05 Thread shv
HDDS-894. Content-length should be set for ozone s3 ranged download. 
Contributed by Elek Marton.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de425550
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de425550
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de425550

Branch: refs/heads/HDFS-12943
Commit: de4255509adbd15fbbf9ade245ae6bb6db8b36b7
Parents: fb9deed
Author: Bharat Viswanadham 
Authored: Mon Dec 3 15:17:44 2018 -0800
Committer: Bharat Viswanadham 
Committed: Mon Dec 3 15:17:44 2018 -0800

--
 .../apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java  | 11 ---
 .../apache/hadoop/ozone/s3/endpoint/TestObjectGet.java   |  2 ++
 2 files changed, 10 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de425550/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index c504387..fdcadfc 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.util.Time;
 
 import com.google.common.annotations.VisibleForTesting;
+import static javax.ws.rs.core.HttpHeaders.CONTENT_LENGTH;
 import static javax.ws.rs.core.HttpHeaders.LAST_MODIFIED;
 import org.apache.commons.io.IOUtils;
 import static org.apache.hadoop.ozone.s3.util.S3Consts.ACCEPT_RANGE_HEADER;
@@ -204,10 +205,12 @@ public class ObjectEndpoint extends EndpointBase {
 IOUtils.copy(key, dest);
   }
 };
-responseBuilder = Response.ok(output);
+responseBuilder = Response
+.ok(output)
+.header(CONTENT_LENGTH, keyDetails.getDataSize());
 
   } else {
-LOG.info("range Header provided value is {}", rangeHeader);
+LOG.debug("range Header provided value is {}", rangeHeader);
 OzoneInputStream key = bucket.readKey(keyPath);
 
 long startOffset = rangeHeader.getStartOffset();
@@ -229,7 +232,9 @@ public class ObjectEndpoint extends EndpointBase {
 copyLength);
   }
 };
-responseBuilder = Response.ok(output);
+responseBuilder = Response
+.ok(output)
+.header(CONTENT_LENGTH, copyLength);
 
 String contentRangeVal = RANGE_HEADER_SUPPORTED_UNIT + " " +
 rangeHeader.getStartOffset() + "-" + rangeHeader.getEndOffset() +

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de425550/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java
--
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java
index f9df9aa..2455322 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java
@@ -80,6 +80,8 @@ public class TestObjectGet {
 IOUtils.toString(ozoneInputStream, Charset.forName("UTF-8"));
 
 Assert.assertEquals(CONTENT, keyContent);
+Assert.assertEquals("" + keyContent.length(),
+response.getHeaderString("Content-Length"));
 
 DateTimeFormatter.RFC_1123_DATE_TIME
 .parse(response.getHeaderString("Last-Modified"));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[27/50] [abbrv] hadoop git commit: HDFS-14105. Replace TreeSet in NamenodeFsck with HashSet. Contributed by Beluga Behr.

2018-12-05 Thread shv
HDFS-14105. Replace TreeSet in NamenodeFsck with HashSet. Contributed by Beluga 
Behr.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99e201df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99e201df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99e201df

Branch: refs/heads/HDFS-12943
Commit: 99e201dfe2295be830efcc80be34706802da30be
Parents: b09cfad
Author: Giovanni Matteo Fumarola 
Authored: Fri Nov 30 11:07:11 2018 -0800
Committer: Giovanni Matteo Fumarola 
Committed: Fri Nov 30 11:07:11 2018 -0800

--
 .../hadoop/hdfs/server/namenode/NamenodeFsck.java   | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99e201df/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index b4c0a93..095a6ff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -30,10 +30,11 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.Date;
 import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Map;
-import java.util.TreeSet;
+import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
 
 import org.apache.commons.io.IOUtils;
@@ -1021,10 +1022,10 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
* around.
*/
   private void copyBlock(final DFSClient dfs, LocatedBlock lblock,
- OutputStream fos) throws Exception {
+  OutputStream fos) throws Exception {
 int failures = 0;
 InetSocketAddress targetAddr = null;
-TreeSet deadNodes = new TreeSet();
+Set deadNodes = new HashSet();
 BlockReader blockReader = null;
 ExtendedBlock block = lblock.getBlock();
 
@@ -1132,9 +1133,8 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
* That's the local one, if available.
*/
   private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes,
-TreeSet deadNodes) throws 
IOException {
-if ((nodes == null) ||
-(nodes.length - deadNodes.size() < 1)) {
+  Set deadNodes) throws IOException {
+if ((nodes == null) || (nodes.length - deadNodes.size() < 1)) {
   throw new IOException("No live nodes contain current block");
 }
 DatanodeInfo chosenNode;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: HDDS-882. Provide a config to optionally turn on/off the sync flag during chunk writes. Contributed by Shashikant Banerjee.

2018-12-05 Thread shv
HDDS-882. Provide a config to optionally turn on/off the sync flag during chunk 
writes. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f3e12ff
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f3e12ff
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f3e12ff

Branch: refs/heads/HDFS-12943
Commit: 8f3e12ff07f5a8490af23f3ca231f97b381682e5
Parents: 5a3c771
Author: Shashikant Banerjee 
Authored: Sun Dec 2 08:06:24 2018 +0530
Committer: Shashikant Banerjee 
Committed: Sun Dec 2 08:06:24 2018 +0530

--
 .../java/org/apache/hadoop/ozone/OzoneConfigKeys.java|  3 +++
 hadoop-hdds/common/src/main/resources/ozone-default.xml  |  8 
 .../hadoop/ozone/container/keyvalue/KeyValueHandler.java |  7 ++-
 .../ozone/container/keyvalue/helpers/ChunkUtils.java | 11 ---
 .../ozone/container/keyvalue/impl/ChunkManagerImpl.java  | 10 --
 .../ozone/container/keyvalue/TestChunkManagerImpl.java   |  2 +-
 .../container/common/impl/TestContainerPersistence.java  |  2 +-
 7 files changed, 35 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f3e12ff/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index df233f7..496861c 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -52,6 +52,9 @@ public final class OzoneConfigKeys {
   public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
   false;
 
+  public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY =
+  "dfs.container.chunk.write.sync";
+  public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = true;
   /**
* Ratis Port where containers listen to.
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f3e12ff/hadoop-hdds/common/src/main/resources/ozone-default.xml
--
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml 
b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 0545805..edce616 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -53,6 +53,14 @@
 
   
   
+dfs.container.chunk.write.sync
+true
+OZONE, CONTAINER, MANAGEMENT
+Determines whether the chunk writes in the container happen as
+  sync I/0 or buffered I/O operation.
+
+  
+  
 dfs.container.ratis.statemachinedata.sync.timeout
 10s
 OZONE, DEBUG, CONTAINER, RATIS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f3e12ff/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index b4cfcd0..5130253 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -49,6 +49,7 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers
 .StorageContainerException;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
@@ -111,13 +112,17 @@ public class KeyValueHandler extends Handler {
   private final VolumeChoosingPolicy volumeChoosingPolicy;
   private final long maxContainerSize;
   private final AutoCloseableLock handlerLock;
+  private final boolean doSyncWrite;
 
   public KeyValueHandler(Configuration config, StateContext context,
   ContainerSet contSet, VolumeSet volSet, ContainerMetrics metrics) {
 super(config, context, contSet, volSet, metrics);
 containerType = ContainerType.KeyValueContainer;
 blockManager = new BlockManagerImpl(config);
-chunkManager = new ChunkManagerImpl();
+doSyncWrite =
+

[48/50] [abbrv] hadoop git commit: Revert "YARN-8870. [Submarine] Add submarine installation scripts. (Xun Liu via wangda)"

2018-12-05 Thread shv
Revert "YARN-8870. [Submarine] Add submarine installation scripts. (Xun Liu via 
wangda)"

This reverts commit 46d6e0016610ced51a76189daeb3ad0e3dbbf94c.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/228156cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/228156cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/228156cf

Branch: refs/heads/HDFS-12943
Commit: 228156cfd1b474988bc4fedfbf7edddc87db41e3
Parents: 9287ab3
Author: Wangda Tan 
Authored: Tue Dec 4 14:13:06 2018 -0800
Committer: Wangda Tan 
Committed: Tue Dec 4 14:13:06 2018 -0800

--
 .../resources/assemblies/hadoop-yarn-dist.xml   |   8 -
 .../installation/install.conf   |  74 
 .../installation/install.sh | 116 -
 .../package/calico/calico-node.service  |  50 ---
 .../installation/package/calico/calicoctl.cfg   |  22 -
 .../installation/package/docker/daemon.json |  23 -
 .../installation/package/docker/docker.service  |  35 --
 .../installation/package/etcd/etcd.service  |  40 --
 .../package/hadoop/container-executor.cfg   |  41 --
 .../installation/package/submarine/submarine.sh |  25 --
 .../installation/scripts/calico.sh  | 224 --
 .../installation/scripts/docker.sh  | 166 ---
 .../installation/scripts/download-server.sh |  42 --
 .../installation/scripts/environment.sh | 213 -
 .../installation/scripts/etcd.sh| 152 ---
 .../installation/scripts/hadoop.sh  | 117 -
 .../installation/scripts/menu.sh| 444 ---
 .../installation/scripts/nvidia-docker.sh   |  99 -
 .../installation/scripts/nvidia.sh  | 120 -
 .../installation/scripts/submarine.sh   |  38 --
 .../installation/scripts/utils.sh   | 123 -
 21 files changed, 2172 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/228156cf/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index 69ab66a..4055acb 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -66,14 +66,6 @@
   0755
 
 
-  
hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation
-  /share/hadoop/yarn/submarine-installer
-  
-**/*
-  
-  0755
-
-
   hadoop-yarn/conf
   etc/hadoop
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/228156cf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/install.conf
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/install.conf
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/install.conf
deleted file mode 100644
index 82dcf61..000
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/installation/install.conf
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env bash
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# install config #
-
-# DNS
-LOCAL_DNS_HOST="172.17.0.9"   # /etc/resolv.conf
-YARN_DNS_HOST="10.196.69.173" # yarn dns server ip address
-
-# etcd hosts list
-ETCD_HOSTS=(10.196.69.173 10.196.69.174 10.196.69.175)
-
-# docker registry ip:port
-DOCKER_REGISTRY="10.120.196.232:5000"
-
-# Start the http download service on the specified server,
-# Will download all the dependencies in the http server,
-# Run the install script on other servers.
-# Automatically download dependencies from http,
-# Solve the problem that all servers are slow to download online.
-# At the same time, you can also manually 

[10/50] [abbrv] hadoop git commit: YARN-8948. PlacementRule interface should be for all YarnSchedulers. Contributed by Bibin A Chundatt.

2018-12-05 Thread shv
YARN-8948. PlacementRule interface should be for all YarnSchedulers. 
Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a68d766e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a68d766e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a68d766e

Branch: refs/heads/HDFS-12943
Commit: a68d766e876631d7ee2e1a6504d4120ba628d178
Parents: c1d24f8
Author: bibinchundatt 
Authored: Thu Nov 29 21:43:34 2018 +0530
Committer: bibinchundatt 
Committed: Thu Nov 29 21:43:34 2018 +0530

--
 .../placement/AppNameMappingPlacementRule.java  | 12 ++--
 .../server/resourcemanager/placement/PlacementRule.java |  4 ++--
 .../placement/UserGroupMappingPlacementRule.java| 11 ++-
 3 files changed, 22 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a68d766e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java
index 2debade..7a46962 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/AppNameMappingPlacementRule.java
@@ -20,11 +20,12 @@ package 
org.apache.hadoop.yarn.server.resourcemanager.placement;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerContext;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerQueueManager;
@@ -61,8 +62,15 @@ public class AppNameMappingPlacementRule extends 
PlacementRule {
   }
 
   @Override
-  public boolean initialize(CapacitySchedulerContext schedulerContext)
+  public boolean initialize(ResourceScheduler scheduler)
   throws IOException {
+if (!(scheduler instanceof CapacityScheduler)) {
+  throw new IOException(
+  "AppNameMappingPlacementRule can be configured only for "
+  + "CapacityScheduler");
+}
+CapacitySchedulerContext schedulerContext =
+(CapacitySchedulerContext) scheduler;
 CapacitySchedulerConfiguration conf = schedulerContext.getConfiguration();
 boolean overrideWithQueueMappings = conf.getOverrideWithQueueMappings();
 LOG.info(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a68d766e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementRule.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementRule.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementRule.java
index 21ab32a..0f3d43c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementRule.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/placement/PlacementRule.java
@@ -22,7 +22,7 @@ import java.io.IOException;
 
 import 

[42/50] [abbrv] hadoop git commit: HDDS-890. Handle OverlappingFileLockException during writeStateMachineData in ContainerStateMachine. Contributed by Shashikant Banerjee.

2018-12-05 Thread shv
HDDS-890. Handle OverlappingFileLockException during writeStateMachineData in 
ContainerStateMachine. Contributed by Shashikant Banerjee.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7274115d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7274115d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7274115d

Branch: refs/heads/HDFS-12943
Commit: 7274115d57fdfef48fca1afa7be7ed2634dd31fa
Parents: ff31313
Author: Mukul Kumar Singh 
Authored: Tue Dec 4 20:12:35 2018 +0530
Committer: Mukul Kumar Singh 
Committed: Tue Dec 4 20:12:35 2018 +0530

--
 .../container/keyvalue/KeyValueHandler.java | 36 ++
 .../keyvalue/impl/ChunkManagerImpl.java | 33 +---
 .../keyvalue/interfaces/ChunkManager.java   |  8 ++--
 .../keyvalue/TestChunkManagerImpl.java  | 40 
 .../common/impl/TestContainerPersistence.java   | 38 +++
 5 files changed, 93 insertions(+), 62 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7274115d/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 5130253..01964ba 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -545,10 +545,12 @@ public class KeyValueHandler extends Handler {
   .getChunkData());
   Preconditions.checkNotNull(chunkInfo);
 
-  boolean isReadFromTmpFile = dispatcherContext == null ? false :
-  dispatcherContext.isReadFromTmpFile();
+  if (dispatcherContext == null) {
+dispatcherContext = new DispatcherContext.Builder().build();
+  }
+
   data = chunkManager
-  .readChunk(kvContainer, blockID, chunkInfo, isReadFromTmpFile);
+  .readChunk(kvContainer, blockID, chunkInfo, dispatcherContext);
   metrics.incContainerBytesStats(Type.ReadChunk, data.length);
 } catch (StorageContainerException ex) {
   return ContainerUtils.logAndReturnError(LOG, ex, request);
@@ -619,15 +621,17 @@ public class KeyValueHandler extends Handler {
   Preconditions.checkNotNull(chunkInfo);
 
   ByteBuffer data = null;
-  WriteChunkStage stage =
-  dispatcherContext == null ? WriteChunkStage.COMBINED :
-  dispatcherContext.getStage();
+  if (dispatcherContext == null) {
+dispatcherContext = new DispatcherContext.Builder().build();
+  }
+  WriteChunkStage stage = dispatcherContext.getStage();
   if (stage == WriteChunkStage.WRITE_DATA ||
   stage == WriteChunkStage.COMBINED) {
 data = request.getWriteChunk().getData().asReadOnlyByteBuffer();
   }
 
-  chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data, stage);
+  chunkManager
+  .writeChunk(kvContainer, blockID, chunkInfo, data, 
dispatcherContext);
 
   // We should increment stats after writeChunk
   if (stage == WriteChunkStage.WRITE_DATA||
@@ -677,19 +681,19 @@ public class KeyValueHandler extends Handler {
   putSmallFileReq.getChunkInfo());
   Preconditions.checkNotNull(chunkInfo);
   ByteBuffer data = putSmallFileReq.getData().asReadOnlyByteBuffer();
-  WriteChunkStage stage =
-  dispatcherContext == null ? WriteChunkStage.COMBINED :
-  dispatcherContext.getStage();
+  if (dispatcherContext == null) {
+dispatcherContext = new DispatcherContext.Builder().build();
+  }
+
   // chunks will be committed as a part of handling putSmallFile
   // here. There is no need to maintain this info in openContainerBlockMap.
-  chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data, stage);
+  chunkManager
+  .writeChunk(kvContainer, blockID, chunkInfo, data, 
dispatcherContext);
 
   List chunks = new LinkedList<>();
   chunks.add(chunkInfo.getProtoBufMessage());
   blockData.setChunks(chunks);
-  long bcsId =
-  dispatcherContext == null ? 0 : dispatcherContext.getLogIndex();
-  blockData.setBlockCommitSequenceId(bcsId);
+  blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex());
 
   blockManager.putBlock(kvContainer, blockData);
   metrics.incContainerBytesStats(Type.PutSmallFile, data.capacity());
@@ -728,11 +732,13 @@ public class KeyValueHandler extends Handler {
 
   

[24/50] [abbrv] hadoop git commit: HADOOP-15922. Revert patch 004.

2018-12-05 Thread shv
HADOOP-15922.  Revert patch 004.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38ea3814
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38ea3814
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38ea3814

Branch: refs/heads/HDFS-12943
Commit: 38ea3814bd0641d895e5d3b7415c6308e7f8491e
Parents: 62f8211
Author: Eric Yang 
Authored: Fri Nov 30 12:49:46 2018 -0500
Committer: Eric Yang 
Committed: Fri Nov 30 12:49:46 2018 -0500

--
 .../DelegationTokenAuthenticationFilter.java|  8 +--
 .../hadoop/crypto/key/kms/server/TestKMS.java   | 64 
 2 files changed, 1 insertion(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38ea3814/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
index 6b3fc1d..5275526 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java
@@ -51,7 +51,6 @@ import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletResponse;
 
 import java.io.IOException;
-import java.net.URLDecoder;
 import java.nio.charset.Charset;
 import java.security.Principal;
 import java.util.Enumeration;
@@ -231,12 +230,7 @@ public class DelegationTokenAuthenticationFilter
   for (NameValuePair nv : list) {
 if (DelegationTokenAuthenticatedURL.DO_AS.
 equalsIgnoreCase(nv.getName())) {
-  String doAsUser = nv.getValue();
-  try {
-doAsUser = URLDecoder.decode(nv.getValue(), UTF8_CHARSET.name());
-  } finally {
-return doAsUser;
-  }
+  return nv.getValue();
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38ea3814/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
--
diff --git 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
index 064ae83..af59877 100644
--- 
a/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
+++ 
b/hadoop-common-project/hadoop-kms/src/test/java/org/apache/hadoop/crypto/key/kms/server/TestKMS.java
@@ -2115,70 +2115,6 @@ public class TestKMS {
 });
   }
 
-  @Test
-  public void testGetDelegationTokenByProxyUser() throws Exception {
-Configuration conf = new Configuration();
-conf.set("hadoop.security.authentication", "kerberos");
-UserGroupInformation.setConfiguration(conf);
-final File testDir = getTestDir();
-
-conf = createBaseKMSConf(testDir, conf);
-conf.set("hadoop.kms.authentication.type", "kerberos");
-conf.set("hadoop.kms.authentication.kerberos.keytab",
-keytab.getAbsolutePath());
-conf.set("hadoop.kms.authentication.kerberos.principal", "HTTP/localhost");
-conf.set("hadoop.kms.authentication.kerberos.name.rules", "DEFAULT");
-conf.set("hadoop.kms.proxyuser.client.users", "foo/localhost");
-conf.set("hadoop.kms.proxyuser.client.hosts", "localhost");
-conf.set(KeyAuthorizationKeyProvider.KEY_ACL + "kcc.ALL",
-"foo/localhost");
-
-writeConf(testDir, conf);
-
-runServer(null, null, testDir, new KMSCallable() {
-  @Override
-  public Void call() throws Exception {
-final Configuration conf = new Configuration();
-final URI uri = createKMSUri(getKMSUrl());
-
-// proxyuser client using kerberos credentials
-UserGroupInformation proxyUgi = UserGroupInformation.
-loginUserFromKeytabAndReturnUGI("client/host", 
keytab.getAbsolutePath());
-UserGroupInformation foo = UserGroupInformation.createProxyUser(
-"foo/localhost", proxyUgi);
-final Credentials credentials = new Credentials();
-foo.doAs(new PrivilegedExceptionAction() {
-  @Override
-  public Void run() throws Exception {
-final KeyProvider kp = createProvider(uri, conf);
-KeyProviderDelegationTokenExtension 

[37/50] [abbrv] hadoop git commit: HDFS-14119. Improve GreedyPlanner Parameter Logging. Contributed by Beluga Behr.

2018-12-05 Thread shv
HDFS-14119. Improve GreedyPlanner Parameter Logging. Contributed by Beluga Behr.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69489ff2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69489ff2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69489ff2

Branch: refs/heads/HDFS-12943
Commit: 69489ff2d18a26be9c56d632672079e134f1cd99
Parents: c9a3aa6
Author: Giovanni Matteo Fumarola 
Authored: Mon Dec 3 12:55:52 2018 -0800
Committer: Giovanni Matteo Fumarola 
Committed: Mon Dec 3 12:55:52 2018 -0800

--
 .../diskbalancer/planner/GreedyPlanner.java | 24 
 1 file changed, 10 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69489ff2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
index 568c1e6..3f97345 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/planner/GreedyPlanner.java
@@ -64,7 +64,7 @@ public class GreedyPlanner implements Planner {
*/
   @Override
   public NodePlan plan(DiskBalancerDataNode node) throws Exception {
-long startTime = Time.monotonicNow();
+final long startTime = Time.monotonicNow();
 NodePlan plan = new NodePlan(node.getDataNodeName(),
 node.getDataNodePort());
 LOG.info("Starting plan for Node : {}:{}",
@@ -75,12 +75,10 @@ public class GreedyPlanner implements Planner {
   }
 }
 
-long endTime = Time.monotonicNow();
-String message = String
-.format("Compute Plan for Node : %s:%d took %d ms ",
-node.getDataNodeName(), node.getDataNodePort(),
-endTime - startTime);
-LOG.info(message);
+final long endTime = Time.monotonicNow();
+LOG.info("Compute Plan for Node : {}:{} took {} ms",
+node.getDataNodeName(), node.getDataNodePort(), endTime - startTime);
+
 return plan;
   }
 
@@ -117,21 +115,19 @@ public class GreedyPlanner implements Planner {
 
   applyStep(nextStep, currentSet, lowVolume, highVolume);
   if (nextStep != null) {
-LOG.debug("Step : {} ",  nextStep.toString());
+LOG.debug("Step : {} ", nextStep);
 plan.addStep(nextStep);
   }
 }
 
-String message = String
-.format("Disk Volume set %s Type : %s plan completed.",
-currentSet.getSetID(),
-currentSet.getVolumes().get(0).getStorageType());
+LOG.info("Disk Volume set {} - Type : {} plan completed.",
+currentSet.getSetID(),
+currentSet.getVolumes().get(0).getStorageType());
 
 plan.setNodeName(node.getDataNodeName());
 plan.setNodeUUID(node.getDataNodeUUID());
 plan.setTimeStamp(Time.now());
 plan.setPort(node.getDataNodePort());
-LOG.info(message);
   }
 
   /**
@@ -207,7 +203,7 @@ public class GreedyPlanner implements Planner {
   // Create a new step
   nextStep = new MoveStep(highVolume, currentSet.getIdealUsed(), lowVolume,
   bytesToMove, currentSet.getSetID());
-  LOG.debug(nextStep.toString());
+  LOG.debug("Next Step: {}", nextStep);
 }
 return nextStep;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: HADOOP-15968. ABFS: add try catch for UGI failure when initializing ABFS.

2018-12-05 Thread shv
HADOOP-15968. ABFS: add try catch for UGI failure when initializing ABFS.

Contributed by Da Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8bbd818
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8bbd818
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8bbd818

Branch: refs/heads/HDFS-12943
Commit: a8bbd818d5bc4762324bcdb7cf1fdd5c2f93891b
Parents: de42555
Author: Da Zhou 
Authored: Tue Dec 4 13:39:10 2018 +
Committer: Steve Loughran 
Committed: Tue Dec 4 13:39:10 2018 +

--
 .../apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java |  7 ++-
 .../hadoop/fs/azurebfs/AzureBlobFileSystemStore.java   | 13 +
 2 files changed, 15 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8bbd818/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
index 38b0c77..b4277c2 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystem.java
@@ -121,7 +121,12 @@ public class AzureBlobFileSystem extends FileSystem {
 }
 
 if (!abfsConfiguration.getSkipUserGroupMetadataDuringInitialization()) {
-  this.primaryUserGroup = userGroupInformation.getPrimaryGroupName();
+  try {
+this.primaryUserGroup = userGroupInformation.getPrimaryGroupName();
+  } catch (IOException ex) {
+LOG.error("Failed to get primary group for {}, using user name as 
primary group name", user);
+this.primaryUserGroup = this.user;
+  }
 } else {
   //Provide a default group name
   this.primaryUserGroup = this.user;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8bbd818/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
index fc684ac..5f1692f 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/AzureBlobFileSystemStore.java
@@ -102,8 +102,8 @@ public class AzureBlobFileSystemStore {
   private AbfsClient client;
   private URI uri;
   private final UserGroupInformation userGroupInformation;
-  private final String userName;
-  private final String primaryUserGroup;
+  private String userName;
+  private String primaryUserGroup;
   private static final String DATE_TIME_PATTERN = "E, dd MMM  HH:mm:ss 
'GMT'";
   private static final String XMS_PROPERTIES_ENCODING = "ISO-8859-1";
   private static final int LIST_MAX_RESULTS = 5000;
@@ -134,10 +134,15 @@ public class AzureBlobFileSystemStore {
 this.userName = userGroupInformation.getShortUserName();
 
 if (!abfsConfiguration.getSkipUserGroupMetadataDuringInitialization()) {
-  primaryUserGroup = userGroupInformation.getPrimaryGroupName();
+  try {
+this.primaryUserGroup = userGroupInformation.getPrimaryGroupName();
+  } catch (IOException ex) {
+LOG.error("Failed to get primary group for {}, using user name as 
primary group name", userName);
+this.primaryUserGroup = userName;
+  }
 } else {
   //Provide a default group name
-  primaryUserGroup = userName;
+  this.primaryUserGroup = userName;
 }
 
 this.azureAtomicRenameDirSet = new HashSet<>(Arrays.asList(


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: HADOOP-15970. Upgrade plexus-utils from 2.0.5 to 3.1.0.

2018-12-05 Thread shv
HADOOP-15970. Upgrade plexus-utils from 2.0.5 to 3.1.0.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff31313d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff31313d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff31313d

Branch: refs/heads/HDFS-12943
Commit: ff31313d83ae26be5e29eaad7d9ce6184bae83e2
Parents: a8bbd81
Author: Akira Ajisaka 
Authored: Tue Dec 4 16:13:50 2018 +0900
Committer: Akira Ajisaka 
Committed: Tue Dec 4 22:45:29 2018 +0900

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff31313d/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index a205d2b..5fd3a56 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -716,7 +716,7 @@
   
 org.codehaus.plexus
 plexus-utils
-2.0.5
+3.1.0
   
   
 org.codehaus.plexus


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: YARN-9010. Fix the incorrect trailing slash deletion in constructor method of CGroupsHandlerImpl. (Zhankun Tang via wangda)

2018-12-05 Thread shv
YARN-9010. Fix the incorrect trailing slash deletion in constructor method of 
CGroupsHandlerImpl. (Zhankun Tang via wangda)

Change-Id: Iaecc66d57781cc10f19ead4647e47fc9556676da


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bad12031
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bad12031
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bad12031

Branch: refs/heads/HDFS-12943
Commit: bad12031f603347a701249a1e3ef5d879a5f1c8f
Parents: 0081b02
Author: Wangda Tan 
Authored: Thu Nov 29 14:56:07 2018 -0800
Committer: Wangda Tan 
Committed: Thu Nov 29 14:56:07 2018 -0800

--
 .../linux/resources/CGroupsHandlerImpl.java |  3 +-
 .../linux/resources/TestCGroupsHandlerImpl.java | 38 
 2 files changed, 40 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bad12031/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
index 050d0a8..1b2c780 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsHandlerImpl.java
@@ -87,9 +87,10 @@ class CGroupsHandlerImpl implements CGroupsHandler {
   CGroupsHandlerImpl(Configuration conf, PrivilegedOperationExecutor
   privilegedOperationExecutor, String mtab)
   throws ResourceHandlerException {
+// Remove leading and trialing slash(es)
 this.cGroupPrefix = conf.get(YarnConfiguration.
 NM_LINUX_CONTAINER_CGROUPS_HIERARCHY, "/hadoop-yarn")
-.replaceAll("^/", "").replaceAll("$/", "");
+.replaceAll("^/+", "").replaceAll("/+$", "");
 this.enableCGroupMount = conf.getBoolean(YarnConfiguration.
 NM_LINUX_CONTAINER_CGROUPS_MOUNT, false);
 this.cGroupMountPath = conf.get(YarnConfiguration.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bad12031/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
index ea6fb52..70badaf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/TestCGroupsHandlerImpl.java
@@ -598,4 +598,42 @@ public class TestCGroupsHandlerImpl {
   FileUtils.deleteQuietly(cpu);
 }
   }
+
+  // Remove leading and trailing slashes
+  @Test
+  public void testCgroupsHierarchySetting() throws ResourceHandlerException {
+YarnConfiguration conf = new YarnConfiguration();
+conf.set(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_MOUNT_PATH, tmpPath);
+conf.set(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_HIERARCHY,
+"/hadoop-yarn");
+CGroupsHandlerImpl cGroupsHandler = new CGroupsHandlerImpl(conf, null);
+String expectedRelativePath = "hadoop-yarn/c1";
+Assert.assertEquals(expectedRelativePath,
+cGroupsHandler.getRelativePathForCGroup("c1"));
+
+conf.set(YarnConfiguration.NM_LINUX_CONTAINER_CGROUPS_HIERARCHY,
+"hadoop-yarn");
+cGroupsHandler = new CGroupsHandlerImpl(conf, null);
+Assert.assertEquals(expectedRelativePath,
+cGroupsHandler.getRelativePathForCGroup("c1"));
+
+

[35/50] [abbrv] hadoop git commit: HDDS-885. Fix test failures due to ChecksumData. Contributed by Hanisha Koneru.

2018-12-05 Thread shv
HDDS-885. Fix test failures due to ChecksumData. Contributed by Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef3b03b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef3b03b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef3b03b7

Branch: refs/heads/HDFS-12943
Commit: ef3b03b75abd5ca5b96dab7985aa8fe17578fa07
Parents: fb10803
Author: Bharat Viswanadham 
Authored: Mon Dec 3 10:52:49 2018 -0800
Committer: Bharat Viswanadham 
Committed: Mon Dec 3 10:52:49 2018 -0800

--
 .../hdds/scm/storage/ContainerProtocolCalls.java  | 14 +++---
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java  |  5 +++--
 .../org/apache/hadoop/ozone/common/Checksum.java  | 14 --
 .../ozone/container/common/helpers/ChunkInfo.java |  9 -
 .../common/src/main/resources/ozone-default.xml   | 18 ++
 .../container/common/impl/TestHddsDispatcher.java |  2 ++
 .../apache/hadoop/ozone/client/rpc/RpcClient.java |  5 +++--
 .../common/TestBlockDeletingService.java  |  4 ++--
 .../container/common/helpers/TestBlockData.java   |  7 ++-
 .../web/storage/DistributedStorageHandler.java|  5 +++--
 10 files changed, 68 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef3b03b7/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index 1f20d00..8af3973 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.hdds.scm.storage;
 import org.apache.hadoop.hdds.scm.XceiverClientAsyncReply;
 import org.apache.hadoop.hdds.scm.container.common.helpers
 .BlockNotCommittedException;
+import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.common.ChecksumData;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers
@@ -305,10 +307,16 @@ public final class ContainerProtocolCalls  {
 KeyValue keyValue =
 KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true")
 .build();
+Checksum checksum = new Checksum();
+ChecksumData checksumData = checksum.computeChecksum(data);
 ChunkInfo chunk =
-ChunkInfo.newBuilder().setChunkName(blockID.getLocalID()
-+ "_chunk").setOffset(0).setLen(data.length).
-addMetadata(keyValue).build();
+ChunkInfo.newBuilder()
+.setChunkName(blockID.getLocalID() + "_chunk")
+.setOffset(0)
+.setLen(data.length)
+.addMetadata(keyValue)
+.setChecksumData(checksumData.getProtoBufMessage())
+.build();
 
 PutSmallFileRequestProto putSmallFileRequest =
 PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef3b03b7/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index 496861c..a331f48 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -342,8 +342,9 @@ public final class OzoneConfigKeys {
   public static final String OZONE_CLIENT_CHECKSUM_TYPE_DEFAULT = "SHA256";
   public static final String OZONE_CLIENT_BYTES_PER_CHECKSUM =
   "ozone.client.bytes.per.checksum";
-  public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT =
-  1024 * 1024; // 1 MB
+  public static final String OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT = "1MB";
+  public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_DEFAULT_BYTES =
+  1024 * 1024;
   public static final int OZONE_CLIENT_BYTES_PER_CHECKSUM_MIN_SIZE = 256 * 
1024;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef3b03b7/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java 

[38/50] [abbrv] hadoop git commit: HADOOP-15852. Refactor QuotaUsage. Contributed by Beluga Behr.

2018-12-05 Thread shv
HADOOP-15852. Refactor QuotaUsage. Contributed by Beluga Behr.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb9deed4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb9deed4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb9deed4

Branch: refs/heads/HDFS-12943
Commit: fb9deed41d6b9f242474b474a5acde0c858e28f6
Parents: 69489ff
Author: Giovanni Matteo Fumarola 
Authored: Mon Dec 3 13:25:02 2018 -0800
Committer: Giovanni Matteo Fumarola 
Committed: Mon Dec 3 13:25:02 2018 -0800

--
 .../java/org/apache/hadoop/fs/QuotaUsage.java   | 108 ++-
 1 file changed, 55 insertions(+), 53 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb9deed4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
index 3472362..4e42e5b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/QuotaUsage.java
@@ -40,14 +40,12 @@ public class QuotaUsage {
   /** Builder class for QuotaUsage. */
   public static class Builder {
 public Builder() {
-  this.quota = -1;
-  this.spaceQuota = -1;
+  this.quota = -1L;
+  this.spaceQuota = -1L;
 
   typeConsumed = new long[StorageType.values().length];
   typeQuota = new long[StorageType.values().length];
-  for (int i = 0; i < typeQuota.length; i++) {
-typeQuota[i] = -1;
-  }
+  Arrays.fill(typeQuota, -1L);
 }
 
 public Builder fileAndDirectoryCount(long count) {
@@ -71,9 +69,8 @@ public class QuotaUsage {
 }
 
 public Builder typeConsumed(long[] typeConsumed) {
-  for (int i = 0; i < typeConsumed.length; i++) {
-this.typeConsumed[i] = typeConsumed[i];
-  }
+  System.arraycopy(typeConsumed, 0, this.typeConsumed, 0,
+  typeConsumed.length);
   return this;
 }
 
@@ -88,9 +85,8 @@ public class QuotaUsage {
 }
 
 public Builder typeQuota(long[] typeQuota) {
-  for (int i = 0; i < typeQuota.length; i++) {
-this.typeQuota[i] = typeQuota[i];
-  }
+  System.arraycopy(typeQuota, 0, this.typeQuota, 0,
+  typeQuota.length);
   return this;
 }
 
@@ -153,22 +149,12 @@ public class QuotaUsage {
 
   /** Return storage type quota. */
   public long getTypeQuota(StorageType type) {
-return (typeQuota != null) ? typeQuota[type.ordinal()] : -1;
+return (typeQuota != null) ? typeQuota[type.ordinal()] : -1L;
   }
 
   /** Return storage type consumed. */
   public long getTypeConsumed(StorageType type) {
-return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0;
-  }
-
-  /** Return storage type quota. */
-  private long[] getTypesQuota() {
-return typeQuota;
-  }
-
-  /** Return storage type quota. */
-  private long[] getTypesConsumed() {
-return typeConsumed;
+return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0L;
   }
 
   /** Return true if any storage type quota has been set. */
@@ -177,7 +163,7 @@ public class QuotaUsage {
   return false;
 }
 for (StorageType t : StorageType.getTypesSupportingQuota()) {
-  if (typeQuota[t.ordinal()] > 0) {
+  if (typeQuota[t.ordinal()] > 0L) {
 return true;
   }
 }
@@ -190,7 +176,7 @@ public class QuotaUsage {
   return false;
 }
 for (StorageType t : StorageType.getTypesSupportingQuota()) {
-  if (typeConsumed[t.ordinal()] > 0) {
+  if (typeConsumed[t.ordinal()] > 0L) {
 return true;
   }
 }
@@ -198,33 +184,50 @@ public class QuotaUsage {
   }
 
   @Override
-  public boolean equals(Object to) {
-return (this == to || (to instanceof QuotaUsage &&
-getFileAndDirectoryCount() ==
-((QuotaUsage) to).getFileAndDirectoryCount() &&
-getQuota() == ((QuotaUsage) to).getQuota() &&
-getSpaceConsumed() == ((QuotaUsage) to).getSpaceConsumed() &&
-getSpaceQuota() == ((QuotaUsage) to).getSpaceQuota() &&
-Arrays.equals(getTypesQuota(), ((QuotaUsage) to).getTypesQuota()) &&
-Arrays.equals(getTypesConsumed(),
-((QuotaUsage) to).getTypesConsumed(;
+  public int hashCode() {
+final int prime = 31;
+int result = 1;
+result = prime * result
++ (int) (fileAndDirectoryCount ^ (fileAndDirectoryCount >>> 32));
+result = prime * result + (int) (quota ^ (quota >>> 32));
+result = prime * result + (int) (spaceConsumed ^ (spaceConsumed >>> 32));
+result = prime * result + 

[11/50] [abbrv] hadoop git commit: YARN-9069. Fix SchedulerInfo#getSchedulerType for custom schedulers. Contributed by Bilwa S T.

2018-12-05 Thread shv
YARN-9069. Fix SchedulerInfo#getSchedulerType for custom schedulers. 
Contributed by Bilwa S T.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/07142f54
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/07142f54
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/07142f54

Branch: refs/heads/HDFS-12943
Commit: 07142f54a8c7f70857e99c041f3a2a5189c809b5
Parents: a68d766
Author: bibinchundatt 
Authored: Thu Nov 29 22:02:59 2018 +0530
Committer: bibinchundatt 
Committed: Thu Nov 29 22:02:59 2018 +0530

--
 .../yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/07142f54/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
index 163f707..ede0d15 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/SchedulerInfo.java
@@ -54,6 +54,8 @@ public class SchedulerInfo {
   this.schedulerName = "Fair Scheduler";
 } else if (rs instanceof FifoScheduler) {
   this.schedulerName = "Fifo Scheduler";
+} else {
+  this.schedulerName = rs.getClass().getSimpleName();
 }
 this.minAllocResource = new 
ResourceInfo(rs.getMinimumResourceCapability());
 this.maxAllocResource = new 
ResourceInfo(rs.getMaximumResourceCapability());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: HADOOP-15974. Upgrade Curator version to 2.13.0 to fix ZK tests. Contributed by Akira Ajisaka

2018-12-05 Thread shv
HADOOP-15974. Upgrade Curator version to 2.13.0 to fix ZK tests. Contributed by 
Akira Ajisaka


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9287ab36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9287ab36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9287ab36

Branch: refs/heads/HDFS-12943
Commit: 9287ab364292ce917fc120532681131821ac53ef
Parents: aa89492
Author: Jason Lowe 
Authored: Tue Dec 4 15:44:03 2018 -0600
Committer: Jason Lowe 
Committed: Tue Dec 4 15:44:03 2018 -0600

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9287ab36/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 5fd3a56..2b1fc09 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -87,7 +87,7 @@
 ${env.HADOOP_PROTOC_PATH}
 
 3.4.13
-2.12.0
+2.13.0
 3.0.0
 3.1.0-RC1
 2.1.7


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: HDDS-877. Ensure correct surefire version for Ozone test. Contributed by Xiaoyu Yao.

2018-12-05 Thread shv
HDDS-877. Ensure correct surefire version for Ozone test. Contributed by Xiaoyu 
Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae5fbdd9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae5fbdd9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae5fbdd9

Branch: refs/heads/HDFS-12943
Commit: ae5fbdd9ed6ef09b588637f2eadd7a04e8382289
Parents: f534736
Author: Xiaoyu Yao 
Authored: Thu Nov 29 11:37:36 2018 -0800
Committer: Xiaoyu Yao 
Committed: Thu Nov 29 11:37:36 2018 -0800

--
 hadoop-hdds/pom.xml  | 1 +
 hadoop-ozone/pom.xml | 3 +--
 2 files changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5fbdd9/hadoop-hdds/pom.xml
--
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 869ecbf..5537b3a 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -53,6 +53,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 0.5.1
 1.5.0.Final
 
+3.0.0-M1
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae5fbdd9/hadoop-ozone/pom.xml
--
diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml
index 39c65d5..4c13bd6 100644
--- a/hadoop-ozone/pom.xml
+++ b/hadoop-ozone/pom.xml
@@ -37,8 +37,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 1.60
 Badlands
 ${ozone.version}
-
-
+3.0.0-M1
   
   
 common


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[25/50] [abbrv] hadoop git commit: YARN-9036. Escape newlines in health report in YARN UI. Contributed by Keqiu Hu

2018-12-05 Thread shv
YARN-9036. Escape newlines in health report in YARN UI. Contributed by Keqiu Hu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d7b44c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d7b44c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d7b44c4

Branch: refs/heads/HDFS-12943
Commit: 6d7b44c48923c3e8a9c4d38f6803664dd5d61c2a
Parents: 38ea381
Author: Jonathan Hung 
Authored: Thu Nov 29 17:02:07 2018 -0800
Committer: Jonathan Hung 
Committed: Fri Nov 30 10:13:52 2018 -0800

--
 .../hadoop/yarn/server/resourcemanager/webapp/NodesPage.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d7b44c4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
index 09b1c0f..cb92baf 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.server.resourcemanager.webapp;
 
 import com.google.inject.Inject;
+import org.apache.commons.text.StringEscapeUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.yarn.api.records.NodeState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -203,8 +204,9 @@ class NodesPage extends RmView {
 nodeTableData.length() - 1);
   }
   nodeTableData.append("]");
+  String nodeTableDataEscaped = 
StringEscapeUtils.escapeJava(nodeTableData.toString());
   html.script().$type("text/javascript")
-  .__("var nodeTableData=" + nodeTableData).__();
+  .__("var nodeTableData=" + nodeTableDataEscaped).__();
   tbody.__().__();
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: YARN-9061. Improve the GPU/FPGA module log message of container-executor. (Zhankun Tang via wangda)

2018-12-05 Thread shv
YARN-9061. Improve the GPU/FPGA module log message of container-executor. 
(Zhankun Tang via wangda)

Change-Id: Iece9b47438357077a53984a820d4d6423f480518


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9ed87567
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9ed87567
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9ed87567

Branch: refs/heads/HDFS-12943
Commit: 9ed87567ad0f1c26a263ce6d8fba56d066260c5c
Parents: 579ef4b
Author: Wangda Tan 
Authored: Wed Nov 28 14:31:31 2018 -0800
Committer: Wangda Tan 
Committed: Wed Nov 28 14:31:31 2018 -0800

--
 .../native/container-executor/impl/modules/fpga/fpga-module.c   | 5 +++--
 .../native/container-executor/impl/modules/gpu/gpu-module.c | 5 +++--
 2 files changed, 6 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ed87567/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/fpga/fpga-module.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/fpga/fpga-module.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/fpga/fpga-module.c
index c1a2f83..e947d7c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/fpga/fpga-module.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/fpga/fpga-module.c
@@ -141,7 +141,7 @@ void reload_fpga_configuration() {
 /*
  * Format of FPGA request commandline:
  *
- * c-e fpga --excluded_fpgas 0,1,3 --container_id container_x_y
+ * c-e --module-fpga --excluded_fpgas 0,1,3 --container_id container_x_y
  */
 int handle_fpga_request(update_cgroups_parameters_function func,
 const char* module_name, int module_argc, char** module_argv) {
@@ -213,7 +213,8 @@ int handle_fpga_request(update_cgroups_parameters_function 
func,
 
   if (!minor_devices) {
  // Minor devices is null, skip following call.
- fprintf(ERRORFILE, "is not specified, skip cgroups call.\n");
+ fprintf(ERRORFILE,
+ "--excluded-fpgas is not specified, skip cgroups call.\n");
  goto cleanup;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9ed87567/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
index 1a1b164..7522338 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/modules/gpu/gpu-module.c
@@ -141,7 +141,7 @@ void reload_gpu_configuration() {
 /*
  * Format of GPU request commandline:
  *
- * c-e gpu --excluded_gpus 0,1,3 --container_id container_x_y
+ * c-e --module-gpu --excluded_gpus 0,1,3 --container_id container_x_y
  */
 int handle_gpu_request(update_cgroups_parameters_func func,
 const char* module_name, int module_argc, char** module_argv) {
@@ -213,7 +213,8 @@ int handle_gpu_request(update_cgroups_parameters_func func,
 
   if (!minor_devices) {
  // Minor devices is null, skip following call.
- fprintf(ERRORFILE, "is not specified, skip cgroups call.\n");
+ fprintf(ERRORFILE,
+ "--excluded_gpus is not specified, skip cgroups call.\n");
  goto cleanup;
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[05/50] [abbrv] hadoop git commit: YARN-8975. [Submarine] Use predefined Charset object StandardCharsets.UTF_8 instead of String UTF-8. (Zhankun Tang via wangda)

2018-12-05 Thread shv
YARN-8975. [Submarine] Use predefined Charset object StandardCharsets.UTF_8 
instead of String UTF-8. (Zhankun Tang via wangda)

Change-Id: If6c7904aa17895e543cfca245264249eb7328bdc


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89764392
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89764392
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89764392

Branch: refs/heads/HDFS-12943
Commit: 897643928c534062d45d00a36b95fd99b4f6
Parents: 8ebeda9
Author: Wangda Tan 
Authored: Wed Nov 28 14:39:06 2018 -0800
Committer: Wangda Tan 
Committed: Wed Nov 28 14:39:06 2018 -0800

--
 .../submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89764392/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
index b58ad77..2e84c96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/runtimes/yarnservice/YarnServiceJobSubmitter.java
@@ -49,6 +49,7 @@ import java.io.IOException;
 import java.io.OutputStreamWriter;
 import java.io.PrintWriter;
 import java.io.Writer;
+import java.nio.charset.StandardCharsets;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
@@ -218,7 +219,8 @@ public class YarnServiceJobSubmitter implements 
JobSubmitter {
   private String generateCommandLaunchScript(RunJobParameters parameters,
   TaskType taskType, Component comp) throws IOException {
 File file = File.createTempFile(taskType.name() + "-launch-script", ".sh");
-Writer w = new OutputStreamWriter(new FileOutputStream(file), "UTF-8");
+Writer w = new OutputStreamWriter(new FileOutputStream(file),
+StandardCharsets.UTF_8);
 PrintWriter pw = new PrintWriter(w);
 
 try {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: HADOOP-15957. WASB: Add asterisk wildcard support for PageBlobDirSet.

2018-12-05 Thread shv
HADOOP-15957. WASB: Add asterisk wildcard support for PageBlobDirSet.

Contributed by Da Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ccb640a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ccb640a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ccb640a

Branch: refs/heads/HDFS-12943
Commit: 7ccb640a66bb5bb9f657a2db85bcc8ae0ded4892
Parents: c9bfca2
Author: Da Zhou 
Authored: Fri Nov 30 10:12:41 2018 +
Committer: Steve Loughran 
Committed: Fri Nov 30 10:12:41 2018 +

--
 .../fs/azure/AzureNativeFileSystemStore.java|  62 ++-
 .../azure/ITestNativeAzureFileSystemLive.java   |   4 +-
 .../fs/azure/TestKeyPageBlobDirectories.java| 170 +++
 3 files changed, 231 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ccb640a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index d2f9ca6..7bf3420 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -44,6 +44,7 @@ import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobContainerWrapper;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobDirectoryWrapper;
 import org.apache.hadoop.fs.azure.StorageInterface.CloudBlobWrapper;
@@ -241,6 +242,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   private static final String HTTP_SCHEME = "http";
   private static final String HTTPS_SCHEME = "https";
   private static final String WASB_AUTHORITY_DELIMITER = "@";
+  private static final char ASTERISK_SYMBOL = '*';
   private static final String AZURE_ROOT_CONTAINER = "$root";
 
   private static final int DEFAULT_CONCURRENT_WRITES = 8;
@@ -1169,7 +1171,7 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 for (String currentDir : rawDirs) {
   String myDir;
   try {
-myDir = verifyAndConvertToStandardFormat(currentDir);
+myDir = verifyAndConvertToStandardFormat(currentDir.trim());
   } catch (URISyntaxException ex) {
 throw new AzureException(String.format(
 "The directory %s specified in the configuration entry %s is not"
@@ -1214,7 +1216,12 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   public boolean isKeyForDirectorySet(String key, Set dirSet) {
 String defaultFS = 
FileSystem.getDefaultUri(sessionConfiguration).toString();
 for (String dir : dirSet) {
-  if (dir.isEmpty() || key.startsWith(dir + "/")) {
+  if (dir.isEmpty()) {
+// dir is root
+return true;
+  }
+
+  if (matchAsteriskPattern(key, dir)) {
 return true;
   }
 
@@ -1227,7 +1234,8 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
   // Concatenate the default file system prefix with the relative
   // page blob directory path.
   //
-  if (key.startsWith(trim(defaultFS, "/") + "/" + dir + "/")){
+  String dirWithPrefix = trim(defaultFS, "/") + "/" + dir;
+  if (matchAsteriskPattern(key, dirWithPrefix)) {
 return true;
   }
 }
@@ -1238,6 +1246,54 @@ public class AzureNativeFileSystemStore implements 
NativeFileSystemStore {
 return false;
   }
 
+  private boolean matchAsteriskPattern(String pathName, String pattern) {
+if (pathName == null || pathName.length() == 0) {
+  return false;
+}
+
+int pathIndex = 0;
+int patternIndex = 0;
+
+while (pathIndex < pathName.length() && patternIndex < pattern.length()) {
+  char charToMatch = pattern.charAt(patternIndex);
+
+  // normal char:
+  if (charToMatch != ASTERISK_SYMBOL) {
+if (charToMatch != pathName.charAt(pathIndex)) {
+  return false;
+}
+pathIndex++;
+patternIndex++;
+continue;
+  }
+
+  // ASTERISK_SYMBOL
+  // 1. * is used in path name: *a/b,a*/b, a/*b, a/b*
+  if (patternIndex > 0 && pattern.charAt(patternIndex - 1) != 
Path.SEPARATOR_CHAR
+  || patternIndex + 1 < pattern.length() && 

[15/50] [abbrv] hadoop git commit: HADOOP-15959. Revert "HADOOP-12751. While using kerberos Hadoop incorrectly assumes names with '@' to be non-simple"

2018-12-05 Thread shv
HADOOP-15959. Revert "HADOOP-12751. While using kerberos Hadoop incorrectly 
assumes names with '@' to be non-simple"

This reverts commit 829a2e4d271f05afb209ddc834cd4a0e85492eda.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0edd372
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0edd372
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0edd372

Branch: refs/heads/HDFS-12943
Commit: d0edd37269bb40290b409d583bcf3b70897c13e0
Parents: 5e102f9
Author: Steve Loughran 
Authored: Thu Nov 29 17:52:11 2018 +
Committer: Steve Loughran 
Committed: Thu Nov 29 17:52:11 2018 +

--
 .../authentication/util/KerberosName.java   |  9 ++--
 .../TestKerberosAuthenticationHandler.java  |  7 ++-
 .../authentication/util/TestKerberosName.java   | 17 ++--
 .../java/org/apache/hadoop/security/KDiag.java  | 46 +---
 .../src/site/markdown/SecureMode.md |  6 ---
 .../org/apache/hadoop/security/TestKDiag.java   | 16 ---
 .../security/TestUserGroupInformation.java  | 27 
 7 files changed, 33 insertions(+), 95 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0edd372/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
index 4e7ee3c..287bb13 100644
--- 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
@@ -324,8 +324,8 @@ public class KerberosName {
 }
   }
   if (result != null && nonSimplePattern.matcher(result).find()) {
-LOG.info("Non-simple name {} after auth_to_local rule {}",
-result, this);
+throw new NoMatchingRule("Non-simple name " + result +
+ " after auth_to_local rule " + this);
   }
   if (toLowerCase && result != null) {
 result = result.toLowerCase(Locale.ENGLISH);
@@ -378,7 +378,7 @@ public class KerberosName {
   /**
* Get the translation of the principal name into an operating system
* user name.
-   * @return the user name
+   * @return the short name
* @throws IOException throws if something is wrong with the rules
*/
   public String getShortName() throws IOException {
@@ -398,8 +398,7 @@ public class KerberosName {
 return result;
   }
 }
-LOG.info("No auth_to_local rules applied to {}", this);
-return toString();
+throw new NoMatchingRule("No rules applied to " + toString());
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0edd372/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
index e672391..8b4bc15 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
@@ -108,7 +108,12 @@ public class TestKerberosAuthenticationHandler
 kn = new KerberosName("bar@BAR");
 Assert.assertEquals("bar", kn.getShortName());
 kn = new KerberosName("bar@FOO");
-Assert.assertEquals("bar@FOO", kn.getShortName());
+try {
+  kn.getShortName();
+  Assert.fail();
+}
+catch (Exception ex) {  
+}
   }
 
   @Test(timeout=6)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0edd372/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java
index c584fce..2db0df4 100644
--- 

[06/50] [abbrv] hadoop git commit: YARN-8989. [YARN-8851] Move DockerCommandPlugin volume related APIs' invocation from DockerLinuxContainerRuntime#prepareContainer to #launchContainer. (Zhankun Tang

2018-12-05 Thread shv
YARN-8989. [YARN-8851] Move DockerCommandPlugin volume related APIs' invocation 
from DockerLinuxContainerRuntime#prepareContainer to #launchContainer. (Zhankun 
Tang via wangda)

Change-Id: Ia6d532c687168448416dfdf46f0ac34bff20e6ca


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fe7dab8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fe7dab8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fe7dab8e

Branch: refs/heads/HDFS-12943
Commit: fe7dab8ef55f08cf18c2d62c782c1ab8930a5a15
Parents: 8976439
Author: Wangda Tan 
Authored: Wed Nov 28 14:55:16 2018 -0800
Committer: Wangda Tan 
Committed: Wed Nov 28 15:03:06 2018 -0800

--
 .../runtime/DockerLinuxContainerRuntime.java| 44 
 .../runtime/TestDockerContainerRuntime.java | 15 ---
 2 files changed, 24 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fe7dab8e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 15ff0ff..225bc19 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
@@ -456,32 +456,6 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   @Override
   public void prepareContainer(ContainerRuntimeContext ctx)
   throws ContainerExecutionException {
-Container container = ctx.getContainer();
-
-// Create volumes when needed.
-if (nmContext != null
-&& nmContext.getResourcePluginManager().getNameToPlugins() != null) {
-  for (ResourcePlugin plugin : nmContext.getResourcePluginManager()
-  .getNameToPlugins().values()) {
-DockerCommandPlugin dockerCommandPlugin =
-plugin.getDockerCommandPluginInstance();
-if (dockerCommandPlugin != null) {
-  DockerVolumeCommand dockerVolumeCommand =
-  dockerCommandPlugin.getCreateDockerVolumeCommand(
-  ctx.getContainer());
-  if (dockerVolumeCommand != null) {
-runDockerVolumeCommand(dockerVolumeCommand, container);
-
-// After volume created, run inspect to make sure volume properly
-// created.
-if (dockerVolumeCommand.getSubCommand().equals(
-DockerVolumeCommand.VOLUME_CREATE_SUB_COMMAND)) {
-  checkDockerVolumeCreated(dockerVolumeCommand, container);
-}
-  }
-}
-  }
-}
   }
 
   private void checkDockerVolumeCreated(
@@ -1034,14 +1008,30 @@ public class DockerLinuxContainerRuntime implements 
LinuxContainerRuntime {
   }
 }
 
-// use plugins to update docker run command.
+// use plugins to create volume and update docker run command.
 if (nmContext != null
 && nmContext.getResourcePluginManager().getNameToPlugins() != null) {
   for (ResourcePlugin plugin : nmContext.getResourcePluginManager()
   .getNameToPlugins().values()) {
 DockerCommandPlugin dockerCommandPlugin =
 plugin.getDockerCommandPluginInstance();
+
 if (dockerCommandPlugin != null) {
+  // Create volumes when needed.
+  DockerVolumeCommand dockerVolumeCommand =
+  dockerCommandPlugin.getCreateDockerVolumeCommand(
+  ctx.getContainer());
+  if (dockerVolumeCommand != null) {
+runDockerVolumeCommand(dockerVolumeCommand, container);
+
+// After volume created, run inspect to make sure volume properly
+// created.
+if (dockerVolumeCommand.getSubCommand().equals(
+DockerVolumeCommand.VOLUME_CREATE_SUB_COMMAND)) {
+  checkDockerVolumeCreated(dockerVolumeCommand, container);
+}
+  }
+  // Update cmd
   dockerCommandPlugin.updateDockerRunCommand(runCommand, container);
 }
   }


[01/50] [abbrv] hadoop git commit: YARN-9030. Log aggregation changes to handle filesystems which do not support setting permissions. (Suma Shivaprasad via wangda)

2018-12-05 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12943 b5497070a -> 47d726015


YARN-9030. Log aggregation changes to handle filesystems which do not support 
setting permissions. (Suma Shivaprasad via wangda)

Change-Id: I80f1e8196b8624e24d74494719fdedfd7061dced


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9de8e8d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9de8e8d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9de8e8d0

Branch: refs/heads/HDFS-12943
Commit: 9de8e8d0496a2628b63cc841b1fdee80e2912f7a
Parents: 4d8de7a
Author: Wangda Tan 
Authored: Wed Nov 21 17:28:37 2018 -0800
Committer: Wangda Tan 
Committed: Wed Nov 28 13:36:21 2018 -0800

--
 .../LogAggregationFileController.java   | 74 +++-
 1 file changed, 56 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9de8e8d0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
index fe65288..e37308d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/filecontroller/LogAggregationFileController.java
@@ -109,6 +109,8 @@ public abstract class LogAggregationFileController {
   protected int retentionSize;
   protected String fileControllerName;
 
+  protected boolean fsSupportsChmod = true;
+
   public LogAggregationFileController() {}
 
   /**
@@ -250,7 +252,6 @@ public abstract class LogAggregationFileController {
* Verify and create the remote log directory.
*/
   public void verifyAndCreateRemoteLogDir() {
-boolean logPermError = true;
 // Checking the existence of the TLD
 FileSystem remoteFS = null;
 try {
@@ -264,14 +265,12 @@ public abstract class LogAggregationFileController {
 try {
   FsPermission perms =
   remoteFS.getFileStatus(remoteRootLogDir).getPermission();
-  if (!perms.equals(TLDIR_PERMISSIONS) && logPermError) {
+  if (!perms.equals(TLDIR_PERMISSIONS)) {
 LOG.warn("Remote Root Log Dir [" + remoteRootLogDir
 + "] already exist, but with incorrect permissions. "
 + "Expected: [" + TLDIR_PERMISSIONS + "], Found: [" + perms
 + "]." + " The cluster may have problems with multiple users.");
-logPermError = false;
-  } else {
-logPermError = true;
+
   }
 } catch (FileNotFoundException e) {
   remoteExists = false;
@@ -280,15 +279,26 @@ public abstract class LogAggregationFileController {
   "Failed to check permissions for dir ["
   + remoteRootLogDir + "]", e);
 }
+
+Path qualified =
+remoteRootLogDir.makeQualified(remoteFS.getUri(),
+remoteFS.getWorkingDirectory());
 if (!remoteExists) {
   LOG.warn("Remote Root Log Dir [" + remoteRootLogDir
   + "] does not exist. Attempting to create it.");
   try {
-Path qualified =
-remoteRootLogDir.makeQualified(remoteFS.getUri(),
-remoteFS.getWorkingDirectory());
 remoteFS.mkdirs(qualified, new FsPermission(TLDIR_PERMISSIONS));
-remoteFS.setPermission(qualified, new FsPermission(TLDIR_PERMISSIONS));
+
+// Not possible to query FileSystem API to check if it supports
+// chmod, chown etc. Hence resorting to catching exceptions here.
+// Remove when FS APi is ready
+try {
+  remoteFS.setPermission(qualified, new 
FsPermission(TLDIR_PERMISSIONS));
+} catch ( UnsupportedOperationException use) {
+  LOG.info("Unable to set permissions for configured filesystem since"
+  + " it does not support this", remoteFS.getScheme());
+  fsSupportsChmod = false;
+}
 
 UserGroupInformation loginUser = UserGroupInformation.getLoginUser();
 String primaryGroupName = null;
@@ -301,13 +311,31 @@ public abstract class LogAggregationFileController {
 }
 // set owner on the remote directory only if the primary group exists
 if (primaryGroupName != null) {
-  remoteFS.setOwner(qualified,
-  loginUser.getShortUserName(), primaryGroupName);
+  

hadoop git commit: YARN-9057. Removed third party class bundle from CSI jar file. Contributed by Weiwei Yang

2018-12-05 Thread eyang
Repository: hadoop
Updated Branches:
  refs/heads/trunk 228156cfd -> 1dabb31cd


YARN-9057.  Removed third party class bundle from CSI jar file.
Contributed by Weiwei Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1dabb31c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1dabb31c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1dabb31c

Branch: refs/heads/trunk
Commit: 1dabb31cdf907cbee418c469368a59393fd52844
Parents: 228156c
Author: Eric Yang 
Authored: Wed Dec 5 15:56:10 2018 -0500
Committer: Eric Yang 
Committed: Wed Dec 5 15:56:10 2018 -0500

--
 .../resources/assemblies/hadoop-yarn-dist.xml   |  4 +
 .../hadoop-yarn/hadoop-yarn-csi/pom.xml | 78 
 2 files changed, 19 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1dabb31c/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
--
diff --git 
a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml 
b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
index 4055acb..a5c3c0e 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-yarn-dist.xml
@@ -220,6 +220,10 @@
   
hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/hadoop-yarn-server-timelineservice-hbase-common/target/lib
   
share/hadoop/${hadoop.component}/timelineservice/lib
 
+
+  hadoop-yarn/hadoop-yarn-csi/target/lib
+  
share/hadoop/${hadoop.component}/csi/lib
+
   
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1dabb31c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
--
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
index 27d8452..1a19f0e 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
@@ -86,14 +86,17 @@
 
 org.apache.hadoop
 hadoop-common
+provided
 
 
 org.apache.hadoop
 hadoop-yarn-common
+provided
 
 
 org.apache.hadoop
 hadoop-yarn-api
+provided
 
 
 javax.annotation
@@ -114,6 +117,18 @@
 
 org.apache.maven.plugins
 maven-dependency-plugin
+
+
+package
+
+copy-dependencies
+
+
+runtime
+
${project.build.directory}/lib
+
+
+
 
 
 org.apache.maven.plugins
@@ -148,69 +163,6 @@
 
 
 
-org.apache.maven.plugins
-maven-shade-plugin
-${maven-shade-plugin.version}
-
-
false
-
-
-
-package
-
-shade
-
-
-
-
-io.grpc
-csi.io.grpc
-
-
-
-com.google
-
csi.com.google
-
-
-io.netty
-csi.io.netty
-
-
-
-
-
-
-
-
-
-
-
-org.apache.maven.plugins
-maven-antrun-plugin
-
-
-unpack
-package
-
-
-
-
-
-
-
-
-
-