hadoop git commit: YARN-6775. CapacityScheduler: Improvements to assignContainers, avoid unnecessary canAssignToUser/Queue calls. (Nathan Roberts via wangda)

2017-07-19 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 6ed569df2 -> a39617df6


YARN-6775. CapacityScheduler: Improvements to assignContainers, avoid 
unnecessary canAssignToUser/Queue calls. (Nathan Roberts via wangda)

Change-Id: I5951f0997547de7d2e4a30b4ad87ab0a59b3066a


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a39617df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a39617df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a39617df

Branch: refs/heads/branch-2.8
Commit: a39617df63e758629123da50d0690f3c13657eca
Parents: 6ed569d
Author: Wangda Tan 
Authored: Wed Jul 19 15:29:45 2017 -0700
Committer: Wangda Tan 
Committed: Wed Jul 19 15:29:45 2017 -0700

--
 .../scheduler/capacity/LeafQueue.java   |  93 
 .../capacity/TestCapacityScheduler.java | 145 +++
 .../scheduler/capacity/TestLeafQueue.java   |  10 +-
 .../scheduler/capacity/TestReservations.java|   6 +-
 4 files changed, 221 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a39617df/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
index 7258aee..bd3bdff 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
@@ -956,25 +956,56 @@ public class LeafQueue extends AbstractCSQueue {
 return CSAssignment.NULL_ASSIGNMENT;
   }
 
+  Map userLimits = new HashMap<>();
+  boolean needAssignToQueueCheck = true;
   for (Iterator assignmentIterator =
orderingPolicy.getAssignmentIterator(); assignmentIterator
.hasNext(); ) {
 FiCaSchedulerApp application = assignmentIterator.next();
 
 // Check queue max-capacity limit
-if (!super.canAssignToThisQueue(clusterResource, node.getPartition(),
-currentResourceLimits, application.getCurrentReservation(),
-schedulingMode)) {
-  return CSAssignment.NULL_ASSIGNMENT;
+Resource appReserved = application.getCurrentReservation();
+if (needAssignToQueueCheck) {
+  if (!super.canAssignToThisQueue(clusterResource, node.getPartition(),
+  currentResourceLimits, appReserved, schedulingMode)) {
+return CSAssignment.NULL_ASSIGNMENT;
+  }
+  // If there was no reservation and canAssignToThisQueue returned
+  // true, there is no reason to check further.
+  if (!this.reservationsContinueLooking
+  || appReserved.equals(Resources.none()) || !node.getPartition()
+  .equals(CommonNodeLabelsManager.NO_LABEL)) {
+needAssignToQueueCheck = false;
+  }
 }
 
+CachedUserLimit cul = userLimits.get(application.getUser());
+Resource cachedUserLimit = null;
+if (cul != null) {
+  cachedUserLimit = cul.userLimit;
+}
 Resource userLimit =
 computeUserLimitAndSetHeadroom(application, clusterResource,
-node.getPartition(), schedulingMode);
+node.getPartition(), schedulingMode, cachedUserLimit);
+if (cul == null) {
+  cul = new CachedUserLimit(userLimit);
+  userLimits.put(application.getUser(), cul);
+}
 
 // Check user limit
-if (!canAssignToUser(clusterResource, application.getUser(), userLimit,
-application, node.getPartition(), currentResourceLimits)) {
+boolean userAssignable = true;
+if (!cul.canAssign && Resources.fitsIn(appReserved, cul.reservation)) {
+  userAssignable = false;
+} else {
+  userAssignable =
+  canAssignToUser(clusterResource, application.getUser(), 
userLimit,
+  appReserved, node.getPartition(), currentResourceLimits);
+  if (!userAssignable && Resources.fitsIn(cul.reservation, 

[47/50] [abbrv] hadoop git commit: YARN-6777. Support for ApplicationMasterService processing chain of interceptors. (asuresh)

2017-07-19 Thread xyao
YARN-6777. Support for ApplicationMasterService processing chain of 
interceptors. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/077fcf6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/077fcf6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/077fcf6a

Branch: refs/heads/HDFS-7240
Commit: 077fcf6a96e420e7f36350931722b8603d010cf1
Parents: 3556e36
Author: Arun Suresh 
Authored: Mon Jul 17 17:02:22 2017 -0700
Committer: Arun Suresh 
Committed: Wed Jul 19 12:26:40 2017 -0700

--
 .../ams/ApplicationMasterServiceContext.java|  29 
 .../ams/ApplicationMasterServiceProcessor.java  |  30 ++--
 .../hadoop/yarn/conf/YarnConfiguration.java |   5 +-
 .../src/main/resources/yarn-default.xml |  10 ++
 .../resourcemanager/AMSProcessingChain.java | 102 
 .../ApplicationMasterService.java   |  49 --
 .../resourcemanager/DefaultAMSProcessor.java|  69 
 ...pportunisticContainerAllocatorAMService.java |  67 +---
 .../yarn/server/resourcemanager/RMContext.java  |   3 +-
 .../TestApplicationMasterService.java   | 163 ++-
 10 files changed, 446 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceContext.java
new file mode 100644
index 000..988c727
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceContext.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.ams;
+
+/**
+ * This is a marker interface for a context object that is injected into
+ * the ApplicationMasterService processor. The processor implementation
+ * is free to type cast this based on the availability of the context's
+ * implementation in the classpath.
+ */
+public interface ApplicationMasterServiceContext {
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
index b426f48..b7d925a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
@@ -38,34 +38,44 @@ import java.io.IOException;
 public interface ApplicationMasterServiceProcessor {
 
   /**
+   * Initialize with and ApplicationMasterService Context as well as the
+   * next processor in the chain.
+   * @param amsContext AMSContext.
+   * @param nextProcessor next ApplicationMasterServiceProcessor
+   */
+  void init(ApplicationMasterServiceContext amsContext,
+  ApplicationMasterServiceProcessor nextProcessor);
+
+  /**
* Register AM attempt.
* @param applicationAttemptId applicationAttemptId.
* @param request Register Request.
-   * @return Register Response.
+   * @param response Register Response.
* @throws IOException IOException.
*/
-  RegisterApplicationMasterResponse registerApplicationMaster(
+  void registerApplicationMaster(
 

[11/50] [abbrv] hadoop git commit: MAPREDUCE-6910. MapReduceTrackingUriPlugin can not return the right URI of history server with HTTPS. Contributed by Lantao Jin

2017-07-19 Thread xyao
MAPREDUCE-6910. MapReduceTrackingUriPlugin can not return the right URI of 
history server with HTTPS. Contributed by Lantao Jin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/43f05032
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/43f05032
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/43f05032

Branch: refs/heads/HDFS-7240
Commit: 43f0503286eccbc6bb8ae77584b635bfd0c48e50
Parents: ebc048c
Author: Ravi Prakash 
Authored: Thu Jul 13 16:16:45 2017 -0700
Committer: Ravi Prakash 
Committed: Thu Jul 13 16:16:45 2017 -0700

--
 .../hadoop/mapreduce/v2/util/MRWebAppUtil.java  |  9 ---
 .../webapp/TestMapReduceTrackingUriPlugin.java  | 26 ++--
 2 files changed, 29 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/43f05032/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
index d367060..951c9d5 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRWebAppUtil.java
@@ -29,7 +29,6 @@ import 
org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
-import org.apache.hadoop.yarn.ipc.RPCUtil;
 
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
@@ -76,7 +75,9 @@ public class MRWebAppUtil {
 : "http://;;
   }
 
-  public static String getJHSWebappScheme() {
+  public static String getJHSWebappScheme(Configuration conf) {
+setHttpPolicyInJHS(conf.get(JHAdminConfig.MR_HS_HTTP_POLICY,
+JHAdminConfig.DEFAULT_MR_HS_HTTP_POLICY));
 return httpPolicyInJHS == HttpConfig.Policy.HTTPS_ONLY ? "https://;
 : "http://;;
   }
@@ -101,7 +102,7 @@ public class MRWebAppUtil {
   }
   
   public static String getJHSWebappURLWithScheme(Configuration conf) {
-return getJHSWebappScheme() + getJHSWebappURLWithoutScheme(conf);
+return getJHSWebappScheme(conf) + getJHSWebappURLWithoutScheme(conf);
   }
   
   public static InetSocketAddress getJHSWebBindAddress(Configuration conf) {
@@ -153,7 +154,7 @@ public class MRWebAppUtil {
   
   public static String getApplicationWebURLOnJHSWithScheme(Configuration conf,
   ApplicationId appId) throws UnknownHostException {
-return getJHSWebappScheme()
+return getJHSWebappScheme(conf)
 + getApplicationWebURLOnJHSWithoutScheme(conf, appId);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/43f05032/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
index 8c3be58..9291097 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs-plugins/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestMapReduceTrackingUriPlugin.java
@@ -23,6 +23,7 @@ import static org.junit.Assert.assertEquals;
 import java.net.URI;
 import java.net.URISyntaxException;
 
+import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -30,17 +31,38 @@ import org.junit.Test;
 
 public class TestMapReduceTrackingUriPlugin {
   @Test
-  public void testProducesHistoryServerUriForAppId() throws URISyntaxException 
{
+  public void 

[27/50] [abbrv] hadoop git commit: HADOOP-14640. Azure: Support affinity for service running on localhost and reuse SPNEGO hadoop.auth cookie for authorization, SASKey and delegation token generation.

2017-07-19 Thread xyao
HADOOP-14640. Azure: Support affinity for service running on localhost and 
reuse SPNEGO hadoop.auth cookie for authorization, SASKey and delegation token 
generation. Contributed by Santhosh G Nayak.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0e78ae0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0e78ae0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0e78ae0

Branch: refs/heads/HDFS-7240
Commit: b0e78ae085928c82ae63a05a29a628c2e289c0fc
Parents: fb3b5d3
Author: Jitendra Pandey 
Authored: Mon Jul 17 02:27:55 2017 -0700
Committer: Jitendra Pandey 
Committed: Mon Jul 17 02:27:55 2017 -0700

--
 .../fs/azure/RemoteSASKeyGeneratorImpl.java |  8 +-
 .../fs/azure/RemoteWasbAuthorizerImpl.java  |  8 +-
 .../fs/azure/SecureWasbRemoteCallHelper.java| 86 
 .../hadoop/fs/azure/WasbRemoteCallHelper.java   | 61 +++---
 .../hadoop/fs/azure/security/Constants.java | 19 +++--
 .../RemoteWasbDelegationTokenManager.java   | 27 +++---
 .../hadoop/fs/azure/security/SpnegoToken.java   | 49 +++
 .../fs/azure/TestWasbRemoteCallHelper.java  | 58 -
 8 files changed, 245 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e78ae0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
index 87f3b0b..a7cedea 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteSASKeyGeneratorImpl.java
@@ -105,10 +105,11 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
*/
   private static final String
   SAS_KEY_GENERATOR_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT =
-  "1000,3,1,2";
+  "10,3,100,2";
 
   private WasbRemoteCallHelper remoteCallHelper = null;
   private boolean isKerberosSupportEnabled;
+  private boolean isSpnegoTokenCacheEnabled;
   private RetryPolicy retryPolicy;
   private String[] commaSeparatedUrls;
 
@@ -127,13 +128,16 @@ public class RemoteSASKeyGeneratorImpl extends 
SASKeyGeneratorImpl {
 
 this.isKerberosSupportEnabled =
 conf.getBoolean(Constants.AZURE_KERBEROS_SUPPORT_PROPERTY_NAME, false);
+this.isSpnegoTokenCacheEnabled =
+conf.getBoolean(Constants.AZURE_ENABLE_SPNEGO_TOKEN_CACHE, true);
 this.commaSeparatedUrls = conf.getTrimmedStrings(KEY_CRED_SERVICE_URLS);
 if (this.commaSeparatedUrls == null || this.commaSeparatedUrls.length <= 
0) {
   throw new IOException(
   KEY_CRED_SERVICE_URLS + " config not set" + " in configuration.");
 }
 if (isKerberosSupportEnabled && UserGroupInformation.isSecurityEnabled()) {
-  this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, 
false);
+  this.remoteCallHelper = new SecureWasbRemoteCallHelper(retryPolicy, 
false,
+  isSpnegoTokenCacheEnabled);
 } else {
   this.remoteCallHelper = new WasbRemoteCallHelper(retryPolicy);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0e78ae0/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
index e2d515c..cd4e0a3 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/RemoteWasbAuthorizerImpl.java
@@ -93,10 +93,11 @@ public class RemoteWasbAuthorizerImpl implements 
WasbAuthorizerInterface {
* Authorization Remote http client retry policy spec default value. {@value}
*/
   private static final String AUTHORIZER_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT 
=
-  "1000,3,1,2";
+  "10,3,100,2";
 
   private WasbRemoteCallHelper remoteCallHelper = null;
   private boolean isKerberosSupportEnabled;
+  private boolean isSpnegoTokenCacheEnabled;
   private RetryPolicy retryPolicy;
   private String[] commaSeparatedUrls = null;
 
@@ -111,6 +112,8 @@ public class RemoteWasbAuthorizerImpl implements 
WasbAuthorizerInterface {
 LOG.debug("Initializing RemoteWasbAuthorizerImpl instance");
 

[34/50] [abbrv] hadoop git commit: HADOOP-14539. Move commons logging APIs over to slf4j in hadoop-common. Contributed by Wenxin He.

2017-07-19 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index a3bccef..1574431 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -32,13 +32,12 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.Shell;
 import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
@@ -49,7 +48,7 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class IOUtils {
-  public static final Log LOG = LogFactory.getLog(IOUtils.class);
+  public static final Logger LOG = LoggerFactory.getLogger(IOUtils.class);
 
   /**
* Copies from one stream to another.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
index 908a893..2e21444 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/MapFile.java
@@ -23,8 +23,6 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
@@ -37,6 +35,8 @@ import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.util.Options;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.ReflectionUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_DEFAULT;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SKIP_KEY;
@@ -60,7 +60,7 @@ import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_MAP_INDEX_SK
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class MapFile {
-  private static final Log LOG = LogFactory.getLog(MapFile.class);
+  private static final Logger LOG = LoggerFactory.getLogger(MapFile.class);
 
   /** The name of the index file. */
   public static final String INDEX_FILE_NAME = "index";
@@ -1002,7 +1002,7 @@ public class MapFile {
   while (reader.next(key, value))   // copy all entries
 writer.append(key, value);
 } finally {
-  IOUtils.cleanup(LOG, writer, reader);
+  IOUtils.cleanupWithLogger(LOG, writer, reader);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
index a8c0690..2e65f12 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/ReadaheadPool.java
@@ -23,8 +23,6 @@ import java.util.concurrent.ArrayBlockingQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -33,6 +31,8 @@ import static 
org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_WILLNEED;
 
 import com.google.common.base.Preconditions;
 import 

[19/50] [abbrv] hadoop git commit: YARN-6625. yarn application -list returns a tracking URL for AM that doesn't work in secured and HA environment. (Yufei Gu)

2017-07-19 Thread xyao
YARN-6625. yarn application -list returns a tracking URL for AM that doesn't 
work in secured and HA environment. (Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e0cde14
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e0cde14
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e0cde14

Branch: refs/heads/HDFS-7240
Commit: 9e0cde1469b8ffeb59619c64d6ece86b62424f04
Parents: e7d187a
Author: Yufei Gu 
Authored: Fri Jul 14 14:10:45 2017 -0700
Committer: Yufei Gu 
Committed: Fri Jul 14 14:10:45 2017 -0700

--
 .../server/webproxy/amfilter/AmIpFilter.java| 60 +
 .../server/webproxy/amfilter/TestAmFilter.java  | 70 +++-
 2 files changed, 114 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e0cde14/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
index 6579191..cdab405 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmIpFilter.java
@@ -23,10 +23,12 @@ import java.net.InetAddress;
 import java.net.MalformedURLException;
 import java.net.URL;
 import java.net.UnknownHostException;
+import java.net.HttpURLConnection;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
+import java.util.Collection;
 
 import javax.servlet.Filter;
 import javax.servlet.FilterChain;
@@ -38,12 +40,12 @@ import javax.servlet.http.Cookie;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.yarn.conf.HAUtil;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.server.webproxy.ProxyUtils;
 import org.apache.hadoop.yarn.server.webproxy.WebAppProxyServlet;
-import org.apache.hadoop.yarn.util.RMHAUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -66,7 +68,8 @@ public class AmIpFilter implements Filter {
   private String[] proxyHosts;
   private Set proxyAddresses = null;
   private long lastUpdate;
-  private Map proxyUriBases;
+  @VisibleForTesting
+  Map proxyUriBases;
 
   @Override
   public void init(FilterConfig conf) throws ServletException {
@@ -187,24 +190,55 @@ public class AmIpFilter implements Filter {
 }
   }
 
-  protected String findRedirectUrl() throws ServletException {
-String addr;
-if (proxyUriBases.size() == 1) {  // external proxy or not RM HA
+  @VisibleForTesting
+  public String findRedirectUrl() throws ServletException {
+String addr = null;
+if (proxyUriBases.size() == 1) {
+  // external proxy or not RM HA
   addr = proxyUriBases.values().iterator().next();
-} else {  // RM HA
+} else {
+  // RM HA
   YarnConfiguration conf = new YarnConfiguration();
-  String activeRMId = RMHAUtils.findActiveRMHAId(conf);
-  String addressPropertyPrefix = YarnConfiguration.useHttps(conf)
-  ? YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS
-  : YarnConfiguration.RM_WEBAPP_ADDRESS;
-  String host = conf.get(
-  HAUtil.addSuffix(addressPropertyPrefix, activeRMId));
-  addr = proxyUriBases.get(host);
+  for (String rmId : getRmIds(conf)) {
+String url = getUrlByRmId(conf, rmId);
+if (isValidUrl(url)) {
+  addr = url;
+  break;
+}
+  }
 }
+
 if (addr == null) {
   throw new ServletException(
   "Could not determine the proxy server for redirection");
 }
 return addr;
   }
+
+  @VisibleForTesting
+  Collection getRmIds(YarnConfiguration conf) {
+return conf.getStringCollection(YarnConfiguration.RM_HA_IDS);
+  }
+
+  @VisibleForTesting
+  String getUrlByRmId(YarnConfiguration conf, String rmId) {
+String addressPropertyPrefix = YarnConfiguration.useHttps(conf) ?
+YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS :
+

[44/50] [abbrv] hadoop git commit: HDFS-12133. Correct ContentSummaryComputationContext Logger class name.. Contributed by Surendra Singh Lilhore.

2017-07-19 Thread xyao
HDFS-12133. Correct ContentSummaryComputationContext Logger class name.. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04ff412d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04ff412d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04ff412d

Branch: refs/heads/HDFS-7240
Commit: 04ff412dabf3f6b9d884171c4140adbc636d5387
Parents: f8cd55f
Author: Brahma Reddy Battula 
Authored: Wed Jul 19 23:43:10 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Wed Jul 19 23:43:10 2017 +0800

--
 .../hdfs/server/namenode/ContentSummaryComputationContext.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04ff412d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 43e6f0d..c81f82c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -48,7 +48,8 @@ public class ContentSummaryComputationContext {
   private int sleepNanoSec = 0;
 
   public static final String REPLICATED = "Replicated";
-  public static final Log LOG = LogFactory.getLog(INode.class);
+  public static final Log LOG = LogFactory
+  .getLog(ContentSummaryComputationContext.class);
 
   private FSPermissionChecker pc;
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[37/50] [abbrv] hadoop git commit: YARN-6798. Fix NM startup failure with old state store due to version mismatch. (Botong Huang via rchiang)

2017-07-19 Thread xyao
YARN-6798. Fix NM startup failure with old state store due to version mismatch. 
(Botong Huang via rchiang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5f14a2a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5f14a2a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5f14a2a

Branch: refs/heads/HDFS-7240
Commit: f5f14a2ad67f91064a73685b44369c6314f0e1cd
Parents: 0b7afc0
Author: Ray Chiang 
Authored: Tue Jul 18 12:35:08 2017 -0700
Committer: Ray Chiang 
Committed: Tue Jul 18 12:35:08 2017 -0700

--
 .../nodemanager/recovery/NMLeveldbStateStoreService.java   | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5f14a2a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
index a0502df..c556b39 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/recovery/NMLeveldbStateStoreService.java
@@ -85,7 +85,11 @@ public class NMLeveldbStateStoreService extends 
NMStateStoreService {
   private static final String DB_NAME = "yarn-nm-state";
   private static final String DB_SCHEMA_VERSION_KEY = "nm-schema-version";
 
-  private static final Version CURRENT_VERSION_INFO = Version.newInstance(3, 
0);
+  /**
+   * Changes from 1.0 to 1.1: Save AMRMProxy state in NMSS.
+   * Changes from 1.2 to 1.2: Save queued container information.
+   */
+  private static final Version CURRENT_VERSION_INFO = Version.newInstance(1, 
2);
 
   private static final String DELETION_TASK_KEY_PREFIX =
   "DeletionService/deltask_";


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[43/50] [abbrv] hadoop git commit: HDFS-12067. Correct dfsadmin commands usage message to reflects IPC port. Contributed by steven-wugang.

2017-07-19 Thread xyao
HDFS-12067. Correct dfsadmin commands usage message to reflects IPC port. 
Contributed by steven-wugang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8cd55fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8cd55fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8cd55fe

Branch: refs/heads/HDFS-7240
Commit: f8cd55fe33665faf2d1b14df231516fc891118fc
Parents: df18025
Author: Brahma Reddy Battula 
Authored: Wed Jul 19 23:21:43 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Wed Jul 19 23:21:43 2017 +0800

--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 57 
 1 file changed, 34 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8cd55fe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 06f408d..ea76093 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -1113,29 +1113,39 @@ public class DFSAdmin extends FsShell {
 "\tor gets a list of reconfigurable properties.\n" +
 
 "\tThe second parameter specifies the node type\n";
-String genericRefresh = "-refresh: Arguments are  
 [arg1..argn]\n" +
-  "\tTriggers a runtime-refresh of the resource specified by 
\n" +
-  "\ton . All other args after are sent to the host.\n";
+String genericRefresh = "-refresh: Arguments are " +
+"  [arg1..argn]\n" +
+"\tTriggers a runtime-refresh of the resource specified by " +
+" on .\n" +
+"\tAll other args after are sent to the host.\n" +
+"\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
 
 String printTopology = "-printTopology: Print a tree of the racks and 
their\n" +
"\t\tnodes as reported by the Namenode\n";
 
-String refreshNamenodes = "-refreshNamenodes: Takes a datanodehost:port as 
argument,\n"+
-  "\t\tFor the given datanode, reloads the 
configuration files,\n" +
-  "\t\tstops serving the removed block-pools\n"+
-  "\t\tand starts serving new block-pools\n";
+String refreshNamenodes = "-refreshNamenodes: Takes a " +
+"datanodehost:ipc_port as argument,For the given datanode\n" +
+"\t\treloads the configuration files,stops serving the removed\n" +
+"\t\tblock-pools and starts serving new block-pools.\n" +
+"\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
 
-String getVolumeReport = "-getVolumeReport: Takes a datanodehost:port as "
-+ "argument,\n\t\tFor the given datanode, get the volume report\n";
-
-String deleteBlockPool = "-deleteBlockPool: Arguments are 
datanodehost:port, blockpool id\n"+
- "\t\t and an optional argument \"force\". If 
force is passed,\n"+
- "\t\t block pool directory for the given 
blockpool id on the given\n"+
- "\t\t datanode is deleted along with its 
contents, otherwise\n"+
- "\t\t the directory is deleted only if it is 
empty. The command\n" +
- "\t\t will fail if datanode is still serving the 
block pool.\n" +
- "\t\t   Refer to refreshNamenodes to shutdown a 
block pool\n" +
- "\t\t service on a datanode.\n";
+String getVolumeReport = "-getVolumeReport: Takes a datanodehost:ipc_port"+
+" as argument,For the given datanode,get the volume report.\n" +
+"\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
+
+String deleteBlockPool = "-deleteBlockPool: Arguments are " +
+"datanodehost:ipc_port, blockpool id and an optional argument\n" +
+"\t\t\"force\". If force is passed,block pool directory for\n" +
+"\t\tthe given blockpool id on the given datanode is deleted\n" +
+"\t\talong with its contents,otherwise the directory is deleted\n"+
+"\t\tonly if it is empty.The command will fail if datanode is\n" +
+"\t\tstill 

[35/50] [abbrv] hadoop git commit: HADOOP-14539. Move commons logging APIs over to slf4j in hadoop-common. Contributed by Wenxin He.

2017-07-19 Thread xyao
HADOOP-14539. Move commons logging APIs over to slf4j in hadoop-common. 
Contributed by Wenxin He.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ccaf0366
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ccaf0366
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ccaf0366

Branch: refs/heads/HDFS-7240
Commit: ccaf036662e22da14583942054898c99fa51dae5
Parents: 5b00792
Author: Akira Ajisaka 
Authored: Tue Jul 18 13:32:37 2017 +0900
Committer: Akira Ajisaka 
Committed: Tue Jul 18 13:32:37 2017 +0900

--
 .../org/apache/hadoop/conf/Configuration.java   | 17 +
 .../apache/hadoop/conf/ReconfigurableBase.java  |  7 ---
 .../hadoop/conf/ReconfigurationServlet.java |  8 
 .../hadoop/crypto/JceAesCtrCryptoCodec.java |  8 
 .../hadoop/crypto/OpensslAesCtrCryptoCodec.java |  8 
 .../org/apache/hadoop/crypto/OpensslCipher.java |  8 
 .../crypto/random/OpensslSecureRandom.java  |  8 
 .../hadoop/crypto/random/OsSecureRandom.java|  9 +
 .../apache/hadoop/fs/AbstractFileSystem.java|  6 +++---
 .../java/org/apache/hadoop/fs/ChecksumFs.java   |  8 
 .../hadoop/fs/DelegationTokenRenewer.java   | 10 +-
 .../org/apache/hadoop/fs/FSInputChecker.java|  9 +
 .../java/org/apache/hadoop/fs/FileContext.java  | 10 +-
 .../java/org/apache/hadoop/fs/FileUtil.java | 10 +-
 .../main/java/org/apache/hadoop/fs/FsShell.java |  6 +++---
 .../apache/hadoop/fs/FsShellPermissions.java|  4 ++--
 .../main/java/org/apache/hadoop/fs/Globber.java |  7 ---
 .../org/apache/hadoop/fs/HarFileSystem.java | 11 ++-
 .../org/apache/hadoop/fs/LocalDirAllocator.java |  9 +
 .../main/java/org/apache/hadoop/fs/Trash.java   |  7 ---
 .../apache/hadoop/fs/TrashPolicyDefault.java|  8 
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java |  8 
 .../hadoop/fs/permission/FsPermission.java  |  6 +++---
 .../hadoop/fs/sftp/SFTPConnectionPool.java  |  7 ---
 .../apache/hadoop/fs/sftp/SFTPFileSystem.java   |  7 ---
 .../org/apache/hadoop/fs/shell/Command.java |  6 +++---
 .../apache/hadoop/ha/ActiveStandbyElector.java  | 15 ---
 .../apache/hadoop/ha/FailoverController.java| 10 +-
 .../main/java/org/apache/hadoop/ha/HAAdmin.java |  8 
 .../org/apache/hadoop/ha/HealthMonitor.java |  8 
 .../java/org/apache/hadoop/ha/NodeFencer.java   |  6 +++---
 .../org/apache/hadoop/ha/SshFenceByTcpPort.java | 11 +++
 .../apache/hadoop/ha/ZKFailoverController.java  | 20 ++--
 ...HAServiceProtocolServerSideTranslatorPB.java |  6 +++---
 .../org/apache/hadoop/http/HttpServer2.java |  6 +++---
 .../hadoop/http/lib/StaticUserWebFilter.java|  7 ---
 .../java/org/apache/hadoop/io/BloomMapFile.java |  6 +++---
 .../apache/hadoop/io/FastByteComparisons.java   |  7 +++
 .../main/java/org/apache/hadoop/io/IOUtils.java |  5 ++---
 .../main/java/org/apache/hadoop/io/MapFile.java |  8 
 .../org/apache/hadoop/io/ReadaheadPool.java |  6 +++---
 .../java/org/apache/hadoop/io/SequenceFile.java |  7 ---
 .../main/java/org/apache/hadoop/io/UTF8.java|  5 +++--
 .../apache/hadoop/io/compress/CodecPool.java|  6 +++---
 .../io/compress/CompressionCodecFactory.java|  8 
 .../apache/hadoop/io/compress/DefaultCodec.java |  6 +++---
 .../io/compress/bzip2/Bzip2Compressor.java  |  8 
 .../io/compress/bzip2/Bzip2Decompressor.java|  8 
 .../hadoop/io/compress/bzip2/Bzip2Factory.java  |  6 +++---
 .../hadoop/io/compress/lz4/Lz4Compressor.java   |  8 
 .../hadoop/io/compress/lz4/Lz4Decompressor.java |  8 
 .../io/compress/snappy/SnappyCompressor.java|  8 
 .../io/compress/snappy/SnappyDecompressor.java  |  8 
 .../io/compress/zlib/BuiltInZlibDeflater.java   |  8 
 .../hadoop/io/compress/zlib/ZlibCompressor.java |  8 
 .../hadoop/io/compress/zlib/ZlibFactory.java|  8 
 .../apache/hadoop/io/erasurecode/CodecUtil.java |  6 +++---
 .../io/erasurecode/ErasureCodeNative.java   |  8 
 .../org/apache/hadoop/io/file/tfile/BCFile.java |  6 +++---
 .../hadoop/io/file/tfile/Compression.java   |  6 +++---
 .../org/apache/hadoop/io/file/tfile/TFile.java  |  8 
 .../hadoop/io/file/tfile/TFileDumper.java   |  8 
 .../org/apache/hadoop/io/nativeio/NativeIO.java | 16 
 .../nativeio/SharedFileDescriptorFactory.java   |  7 ---
 .../apache/hadoop/io/retry/RetryPolicies.java   |  6 +++---
 .../org/apache/hadoop/io/retry/RetryUtils.java  |  6 +++---
 .../io/serializer/SerializationFactory.java |  8 
 .../org/apache/hadoop/ipc/CallQueueManager.java |  7 ---
 

[25/50] [abbrv] hadoop git commit: HDFS-11786. Add support to make copyFromLocal multi threaded. Contributed by Mukul Kumar Singh.

2017-07-19 Thread xyao
HDFS-11786. Add support to make copyFromLocal multi threaded. Contributed by 
Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02b141ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02b141ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02b141ac

Branch: refs/heads/HDFS-7240
Commit: 02b141ac6059323ec43e472ca36dc570fdca386f
Parents: b778887
Author: Anu Engineer 
Authored: Sun Jul 16 10:59:34 2017 -0700
Committer: Anu Engineer 
Committed: Sun Jul 16 10:59:34 2017 -0700

--
 .../apache/hadoop/fs/shell/CopyCommands.java| 112 +++-
 .../apache/hadoop/fs/shell/MoveCommands.java|   4 +-
 .../hadoop/fs/shell/TestCopyFromLocal.java  | 173 +++
 .../hadoop/fs/shell/TestCopyPreserveFlag.java   |  19 ++
 .../src/test/resources/testConf.xml |  44 -
 5 files changed, 346 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02b141ac/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
index e2fad75..7b3c53e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
@@ -26,7 +26,11 @@ import java.net.URISyntaxException;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.ArrayBlockingQueue;
+import java.util.concurrent.TimeUnit;
 
+import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -288,9 +292,113 @@ class CopyCommands {
   }
 
   public static class CopyFromLocal extends Put {
+private ThreadPoolExecutor executor = null;
+private int numThreads = 1;
+
+private static final int MAX_THREADS =
+Runtime.getRuntime().availableProcessors() * 2;
 public static final String NAME = "copyFromLocal";
-public static final String USAGE = Put.USAGE;
-public static final String DESCRIPTION = "Identical to the -put command.";
+public static final String USAGE =
+"[-f] [-p] [-l] [-d] [-t ]  ... ";
+public static final String DESCRIPTION =
+"Copy files from the local file system " +
+"into fs. Copying fails if the file already " +
+"exists, unless the -f flag is given.\n" +
+"Flags:\n" +
+"  -p : Preserves access and modification times, ownership and the" +
+" mode.\n" +
+"  -f : Overwrites the destination if it already exists.\n" +
+"  -t  : Number of threads to be used, default is 1.\n" +
+"  -l : Allow DataNode to lazily persist the file to disk. Forces" +
+" replication factor of 1. This flag will result in reduced" +
+" durability. Use with care.\n" +
+"  -d : Skip creation of temporary file(._COPYING_).\n";
+
+private void setNumberThreads(String numberThreadsString) {
+  if (numberThreadsString == null) {
+numThreads = 1;
+  } else {
+int parsedValue = Integer.parseInt(numberThreadsString);
+if (parsedValue <= 1) {
+  numThreads = 1;
+} else if (parsedValue > MAX_THREADS) {
+  numThreads = MAX_THREADS;
+} else {
+  numThreads = parsedValue;
+}
+  }
+}
+
+@Override
+protected void processOptions(LinkedList args) throws IOException {
+  CommandFormat cf =
+  new CommandFormat(1, Integer.MAX_VALUE, "f", "p", "l", "d");
+  cf.addOptionWithValue("t");
+  cf.parse(args);
+  setNumberThreads(cf.getOptValue("t"));
+  setOverwrite(cf.getOpt("f"));
+  setPreserve(cf.getOpt("p"));
+  setLazyPersist(cf.getOpt("l"));
+  setDirectWrite(cf.getOpt("d"));
+  getRemoteDestination(args);
+  // should have a -r option
+  setRecursive(true);
+}
+
+private void copyFile(PathData src, PathData target) throws IOException {
+  if (isPathRecursable(src)) {
+throw new PathIsDirectoryException(src.toString());
+  }
+  super.copyFileToTarget(src, target);
+}
+
+@Override
+protected void copyFileToTarget(PathData src, PathData target)
+throws IOException {
+  // if number of thread is 1, mimic put and 

[22/50] [abbrv] hadoop git commit: HDFS-12112. TestBlockManager#testBlockManagerMachinesArray sometimes fails with NPE. Contributed by Wei-Chiu Chuang.

2017-07-19 Thread xyao
HDFS-12112. TestBlockManager#testBlockManagerMachinesArray sometimes fails with 
NPE. Contributed by Wei-Chiu Chuang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b778887a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b778887a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b778887a

Branch: refs/heads/HDFS-7240
Commit: b778887af59d96f1fac30cae14be1cabbdb74c8b
Parents: 06ece48
Author: Brahma Reddy Battula 
Authored: Sat Jul 15 10:38:31 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Sat Jul 15 10:38:31 2017 +0800

--
 .../hadoop/hdfs/server/blockmanagement/TestBlockManager.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b778887a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 3088b7b..6b1a979 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -1219,7 +1219,7 @@ public class TestBlockManager {
 }
   }
 
-  @Test
+  @Test(timeout = 6)
   public void testBlockManagerMachinesArray() throws Exception {
 final Configuration conf = new HdfsConfiguration();
 final MiniDFSCluster cluster =
@@ -1230,6 +1230,8 @@ public class TestBlockManager {
 final Path filePath = new Path("/tmp.txt");
 final long fileLen = 1L;
 DFSTestUtil.createFile(fs, filePath, fileLen, (short) 3, 1L);
+DFSTestUtil.waitForReplication((DistributedFileSystem)fs,
+filePath, (short) 3, 6);
 ArrayList datanodes = cluster.getDataNodes();
 assertEquals(datanodes.size(), 4);
 FSNamesystem ns = cluster.getNamesystem();


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: HDFS-12139. HTTPFS liststatus returns incorrect pathSuffix for path of file. Contributed by Yongjun Zhang.

2017-07-19 Thread xyao
HDFS-12139. HTTPFS liststatus returns incorrect pathSuffix for path of file. 
Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3556e36b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3556e36b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3556e36b

Branch: refs/heads/HDFS-7240
Commit: 3556e36be30211f46ac38899ce11a4d4cac6d635
Parents: 413b23e
Author: Yongjun Zhang 
Authored: Wed Jul 19 10:54:13 2017 -0700
Committer: Yongjun Zhang 
Committed: Wed Jul 19 10:56:50 2017 -0700

--
 .../hadoop/fs/http/server/FSOperations.java | 15 ++-
 .../fs/http/client/BaseTestHttpFSWith.java  | 26 +++-
 2 files changed, 34 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3556e36b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 0fb665a..f1615c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -75,15 +75,17 @@ public class FSOperations {
 
   /**
* @param fileStatuses list of FileStatus objects
+   * @param isFile is the fileStatuses from a file path
* @return JSON map suitable for wire transport
*/
   @SuppressWarnings({"unchecked"})
-  private static Map toJson(FileStatus[] fileStatuses) {
+  private static Map toJson(FileStatus[] fileStatuses,
+  boolean isFile) {
 Map json = new LinkedHashMap<>();
 Map inner = new LinkedHashMap<>();
 JSONArray statuses = new JSONArray();
 for (FileStatus f : fileStatuses) {
-  statuses.add(toJsonInner(f, false));
+  statuses.add(toJsonInner(f, isFile));
 }
 inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
 json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
@@ -129,13 +131,14 @@ public class FSOperations {
* These two classes are slightly different, due to the impedance
* mismatches between the WebHDFS and FileSystem APIs.
* @param entries
+   * @param isFile is the entries from a file path
* @return json
*/
   private static Map toJson(FileSystem.DirectoryEntries
-  entries) {
+  entries, boolean isFile) {
 Map json = new LinkedHashMap<>();
 Map inner = new LinkedHashMap<>();
-Map fileStatuses = toJson(entries.getEntries());
+Map fileStatuses = toJson(entries.getEntries(), isFile);
 inner.put(HttpFSFileSystem.PARTIAL_LISTING_JSON, fileStatuses);
 inner.put(HttpFSFileSystem.REMAINING_ENTRIES_JSON, entries.hasMore() ? 1
 : 0);
@@ -690,7 +693,7 @@ public class FSOperations {
 @Override
 public Map execute(FileSystem fs) throws IOException {
   FileStatus[] fileStatuses = fs.listStatus(path, filter);
-  return toJson(fileStatuses);
+  return toJson(fileStatuses, fs.getFileStatus(path).isFile());
 }
 
 @Override
@@ -735,7 +738,7 @@ public class FSOperations {
   WrappedFileSystem wrappedFS = new WrappedFileSystem(fs);
   FileSystem.DirectoryEntries entries =
   wrappedFS.listStatusBatch(path, token);
-  return toJson(entries);
+  return toJson(entries, wrappedFS.getFileStatus(path).isFile());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3556e36b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index 0fd3f91..e23093e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -364,8 +364,15 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
 assertEquals(status2.getLen(), status1.getLen());
 
 FileStatus[] stati = fs.listStatus(path.getParent());
-assertEquals(stati.length, 1);
+assertEquals(1, 

[45/50] [abbrv] hadoop git commit: HDFS-12158. Secondary Namenode's web interface lack configs for X-FRAME-OPTIONS protection. Contributed by Mukul Kumar Singh.

2017-07-19 Thread xyao
HDFS-12158. Secondary Namenode's web interface lack configs for X-FRAME-OPTIONS 
protection. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/413b23eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/413b23eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/413b23eb

Branch: refs/heads/HDFS-7240
Commit: 413b23eb04eee24275257ab462133e0818f87449
Parents: 04ff412
Author: Anu Engineer 
Authored: Wed Jul 19 10:29:06 2017 -0700
Committer: Anu Engineer 
Committed: Wed Jul 19 10:29:06 2017 -0700

--
 .../hdfs/server/namenode/SecondaryNameNode.java | 10 +
 .../namenode/TestNameNodeHttpServerXFrame.java  | 22 
 2 files changed, 32 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/413b23eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 6dd085a..ff83e34 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -479,6 +479,16 @@ public class SecondaryNameNode implements Runnable,
 DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
 DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
 
+final boolean xFrameEnabled = conf.getBoolean(
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED,
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
+
+final String xFrameOptionValue = conf.getTrimmed(
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE,
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT);
+
+builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue);
+
 infoServer = builder.build();
 infoServer.setAttribute("secondary.name.node", this);
 infoServer.setAttribute("name.system.image", checkpointImage);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/413b23eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
index 947e951..aaa713e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer2;
@@ -32,6 +33,7 @@ import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.URL;
+import java.net.URI;
 
 /**
  * A class to test the XFrameoptions of Namenode HTTP Server. We are not 
reusing
@@ -94,4 +96,24 @@ public class TestNameNodeHttpServerXFrame {
 conn.connect();
 return conn;
   }
+
+  @Test
+  public void testSecondaryNameNodeXFrame() throws IOException {
+Configuration conf = new HdfsConfiguration();
+FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
+
+SecondaryNameNode sn = new SecondaryNameNode(conf);
+sn.startInfoServer();
+InetSocketAddress httpAddress = SecondaryNameNode.getHttpAddress(conf);
+
+URL url = URI.create("http://; + httpAddress.getHostName()
++ ":" + httpAddress.getPort()).toURL();
+HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+conn.connect();
+String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
+Assert.assertTrue("X-FRAME-OPTIONS is absent in the header",
+xfoHeader != null);
+Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption
+.SAMEORIGIN.toString()));
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[50/50] [abbrv] hadoop git commit: Merge branch 'trunk' into HDFS-7240

2017-07-19 Thread xyao
Merge branch 'trunk' into HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b3a7f3b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b3a7f3b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b3a7f3b2

Branch: refs/heads/HDFS-7240
Commit: b3a7f3b2dfffdd83abcfbe630e31acfcf68f5521
Parents: 84e11c7 c21c260
Author: Xiaoyu Yao 
Authored: Wed Jul 19 14:52:35 2017 -0700
Committer: Xiaoyu Yao 
Committed: Wed Jul 19 14:52:35 2017 -0700

--
 hadoop-client-modules/hadoop-client/pom.xml |   4 -
 .../util/TestCertificateUtil.java   |   6 +-
 hadoop-common-project/hadoop-common/pom.xml |   4 -
 .../org/apache/hadoop/conf/Configuration.java   |  27 +-
 .../apache/hadoop/conf/ReconfigurableBase.java  |   7 +-
 .../hadoop/conf/ReconfigurationServlet.java |   8 +-
 .../hadoop/crypto/JceAesCtrCryptoCodec.java |   8 +-
 .../hadoop/crypto/OpensslAesCtrCryptoCodec.java |   8 +-
 .../org/apache/hadoop/crypto/OpensslCipher.java |   8 +-
 .../crypto/key/kms/KMSClientProvider.java   |  39 +-
 .../key/kms/LoadBalancingKMSClientProvider.java |  90 +-
 .../crypto/random/OpensslSecureRandom.java  |   8 +-
 .../hadoop/crypto/random/OsSecureRandom.java|   9 +-
 .../apache/hadoop/fs/AbstractFileSystem.java|   6 +-
 .../java/org/apache/hadoop/fs/ChecksumFs.java   |   8 +-
 .../fs/CommonConfigurationKeysPublic.java   |  29 +
 .../hadoop/fs/DelegationTokenRenewer.java   |  10 +-
 .../org/apache/hadoop/fs/FSInputChecker.java|   9 +-
 .../java/org/apache/hadoop/fs/FileContext.java  |  10 +-
 .../java/org/apache/hadoop/fs/FileUtil.java |  10 +-
 .../main/java/org/apache/hadoop/fs/FsShell.java |   6 +-
 .../apache/hadoop/fs/FsShellPermissions.java|   4 +-
 .../main/java/org/apache/hadoop/fs/Globber.java |   7 +-
 .../org/apache/hadoop/fs/HarFileSystem.java |  11 +-
 .../org/apache/hadoop/fs/LocalDirAllocator.java |   9 +-
 .../main/java/org/apache/hadoop/fs/Trash.java   |   7 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java|   8 +-
 .../org/apache/hadoop/fs/ftp/FTPFileSystem.java |   8 +-
 .../hadoop/fs/permission/FsPermission.java  |   6 +-
 .../hadoop/fs/sftp/SFTPConnectionPool.java  |   7 +-
 .../apache/hadoop/fs/sftp/SFTPFileSystem.java   |   7 +-
 .../org/apache/hadoop/fs/shell/Command.java |   6 +-
 .../apache/hadoop/fs/shell/CopyCommands.java| 112 ++-
 .../apache/hadoop/fs/shell/MoveCommands.java|   4 +-
 .../apache/hadoop/ha/ActiveStandbyElector.java  |  15 +-
 .../apache/hadoop/ha/FailoverController.java|  10 +-
 .../main/java/org/apache/hadoop/ha/HAAdmin.java |   8 +-
 .../org/apache/hadoop/ha/HealthMonitor.java |   8 +-
 .../java/org/apache/hadoop/ha/NodeFencer.java   |   6 +-
 .../org/apache/hadoop/ha/PowerShellFencer.java  |   7 +-
 .../apache/hadoop/ha/ShellCommandFencer.java|   7 +-
 .../org/apache/hadoop/ha/SshFenceByTcpPort.java |  18 +-
 .../java/org/apache/hadoop/ha/StreamPumper.java |   8 +-
 .../apache/hadoop/ha/ZKFailoverController.java  |  20 +-
 ...HAServiceProtocolServerSideTranslatorPB.java |   6 +-
 .../org/apache/hadoop/http/HttpServer2.java |   6 +-
 .../hadoop/http/lib/StaticUserWebFilter.java|   7 +-
 .../java/org/apache/hadoop/io/BloomMapFile.java |   6 +-
 .../apache/hadoop/io/FastByteComparisons.java   |   7 +-
 .../main/java/org/apache/hadoop/io/IOUtils.java |   5 +-
 .../main/java/org/apache/hadoop/io/MapFile.java |   8 +-
 .../org/apache/hadoop/io/ReadaheadPool.java |   6 +-
 .../java/org/apache/hadoop/io/SequenceFile.java |   7 +-
 .../main/java/org/apache/hadoop/io/UTF8.java|   5 +-
 .../apache/hadoop/io/compress/CodecPool.java|   6 +-
 .../io/compress/CompressionCodecFactory.java|   8 +-
 .../apache/hadoop/io/compress/DefaultCodec.java |   6 +-
 .../io/compress/bzip2/Bzip2Compressor.java  |   8 +-
 .../io/compress/bzip2/Bzip2Decompressor.java|   8 +-
 .../hadoop/io/compress/bzip2/Bzip2Factory.java  |   6 +-
 .../hadoop/io/compress/lz4/Lz4Compressor.java   |   8 +-
 .../hadoop/io/compress/lz4/Lz4Decompressor.java |   8 +-
 .../io/compress/snappy/SnappyCompressor.java|   8 +-
 .../io/compress/snappy/SnappyDecompressor.java  |   8 +-
 .../io/compress/zlib/BuiltInZlibDeflater.java   |   8 +-
 .../hadoop/io/compress/zlib/ZlibCompressor.java |   8 +-
 .../hadoop/io/compress/zlib/ZlibFactory.java|   8 +-
 .../apache/hadoop/io/erasurecode/CodecUtil.java |   6 +-
 .../io/erasurecode/ErasureCodeNative.java   |   8 +-
 .../org/apache/hadoop/io/file/tfile/BCFile.java |   6 +-
 .../hadoop/io/file/tfile/Compression.java   |   6 +-
 .../org/apache/hadoop/io/file/tfile/TFile.java  |   8 +-
 .../hadoop/io/file/tfile/TFileDumper.java   |   8 +-
 .../org/apache/hadoop/io/nativeio/NativeIO.java |  16 +-
 .../nativeio/SharedFileDescriptorFactory.java   |   7 +-
 

[39/50] [abbrv] hadoop git commit: YARN-6778. In ResourceWeights, weights and setWeights() should be final. (Daniel Templeton via Yufei Gu)

2017-07-19 Thread xyao
YARN-6778. In ResourceWeights, weights and setWeights() should be final. 
(Daniel Templeton via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/daaf530f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/daaf530f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/daaf530f

Branch: refs/heads/HDFS-7240
Commit: daaf530fce4b91cf9f568b9b0c5e8b20e6774134
Parents: 5aa2bf2
Author: Yufei Gu 
Authored: Tue Jul 18 16:38:07 2017 -0700
Committer: Yufei Gu 
Committed: Tue Jul 18 16:38:07 2017 -0700

--
 .../yarn/server/resourcemanager/resource/ResourceWeights.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/daaf530f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
index 4c62318..3ce1517 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/resource/ResourceWeights.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.util.StringUtils;
 public class ResourceWeights {
   public static final ResourceWeights NEUTRAL = new ResourceWeights(1.0f);
 
-  private float[] weights = new float[ResourceType.values().length];
+  private final float[] weights = new float[ResourceType.values().length];
 
   public ResourceWeights(float memoryWeight, float cpuWeight) {
 weights[ResourceType.MEMORY.ordinal()] = memoryWeight;
@@ -40,7 +40,7 @@ public class ResourceWeights {
 
   public ResourceWeights() { }
 
-  public void setWeight(float weight) {
+  public final void setWeight(float weight) {
 for (int i = 0; i < weights.length; i++) {
   weights[i] = weight;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[26/50] [abbrv] hadoop git commit: MAPREDUCE-6889. Add Job#close API to shutdown MR client services. Contributed by Rohith Sharma K S.

2017-07-19 Thread xyao
MAPREDUCE-6889. Add Job#close API to shutdown MR client services. Contributed 
by Rohith Sharma K S.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb3b5d33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb3b5d33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb3b5d33

Branch: refs/heads/HDFS-7240
Commit: fb3b5d33ffb29ee8e1ffbd2eee7a603a5777ebaf
Parents: 02b141a
Author: Sunil G 
Authored: Mon Jul 17 13:35:15 2017 +0530
Committer: Sunil G 
Committed: Mon Jul 17 13:35:15 2017 +0530

--
 .../src/main/java/org/apache/hadoop/mapreduce/Job.java | 13 -
 .../hadoop/mapreduce/TestMapperReducerCleanup.java |  4 
 2 files changed, 16 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb3b5d33/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
index 2048768..5530d95 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Job.java
@@ -75,7 +75,7 @@ import org.apache.hadoop.yarn.api.records.ReservationId;
  */
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
-public class Job extends JobContextImpl implements JobContext {  
+public class Job extends JobContextImpl implements JobContext, AutoCloseable {
   private static final Log LOG = LogFactory.getLog(Job.class);
 
   @InterfaceStability.Evolving
@@ -1553,4 +1553,15 @@ public class Job extends JobContextImpl implements 
JobContext {
 this.reservationId = reservationId;
   }
   
+  /**
+   * Close the Job.
+   * @throws IOException if fail to close.
+   */
+  @Override
+  public void close() throws IOException {
+if (cluster != null) {
+  cluster.close();
+  cluster = null;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb3b5d33/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapperReducerCleanup.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapperReducerCleanup.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapperReducerCleanup.java
index 36ec966..27e4e4f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapperReducerCleanup.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMapperReducerCleanup.java
@@ -329,6 +329,10 @@ public class TestMapperReducerCleanup {
 Assert.assertTrue(reduceCleanup);
 Assert.assertTrue(recordReaderCleanup);
 Assert.assertTrue(recordWriterCleanup);
+
+Assert.assertNotNull(job.getCluster());
+job.close();
+Assert.assertNull(job.getCluster());
   }
 
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[41/50] [abbrv] hadoop git commit: HADOOP-14642. wasb: add support for caching Authorization and SASKeys. Contributed by Sivaguru Sankaridurg.

2017-07-19 Thread xyao
HADOOP-14642. wasb: add support for caching Authorization and SASKeys. 
Contributed by Sivaguru Sankaridurg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2843c688
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2843c688
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2843c688

Branch: refs/heads/HDFS-7240
Commit: 2843c688bcc21c65eb3538ffb3caeaffe440eda8
Parents: 845c4e5
Author: Jitendra Pandey 
Authored: Wed Jul 19 00:13:06 2017 -0700
Committer: Jitendra Pandey 
Committed: Wed Jul 19 00:13:06 2017 -0700

--
 .../src/main/resources/core-default.xml |   9 +-
 .../conf/TestCommonConfigurationFields.java |   1 +
 .../hadoop/fs/azure/CachingAuthorizer.java  | 232 +++
 .../fs/azure/LocalSASKeyGeneratorImpl.java  |  28 ++-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |   3 -
 .../fs/azure/RemoteSASKeyGeneratorImpl.java |  46 +++-
 .../fs/azure/RemoteWasbAuthorizerImpl.java  |  38 ++-
 .../hadoop/fs/azure/SASKeyGeneratorImpl.java|   4 +-
 .../hadoop-azure/src/site/markdown/index.md |  38 +++
 .../hadoop/fs/azure/AbstractWasbTestBase.java   |   5 +
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java |  22 +-
 .../TestNativeAzureFSAuthorizationCaching.java  |  60 +
 .../TestNativeAzureFileSystemAuthorization.java |  86 ++-
 ...veAzureFileSystemAuthorizationWithOwner.java |   2 +-
 .../fs/azure/TestWasbRemoteCallHelper.java  |   6 +-
 .../src/test/resources/azure-test.xml   |   3 +-
 16 files changed, 499 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index a705a4e..68b0a9d 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1343,7 +1343,14 @@
 configuration
   
 
-
+
+  fs.azure.authorization.caching.enable
+  true
+  
+Config flag to enable caching of authorization results and saskeys in WASB.
+This flag is relevant only when fs.azure.authorization is enabled.
+  
+
 
 
   io.seqfile.compress.blocksize

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 8524973..593254eb 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -115,6 +115,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPropsToSkipCompare.add("fs.azure.local.sas.key.mode");
 xmlPropsToSkipCompare.add("fs.azure.secure.mode");
 xmlPropsToSkipCompare.add("fs.azure.authorization");
+xmlPropsToSkipCompare.add("fs.azure.authorization.caching.enable");
 
 // Deprecated properties.  These should eventually be removed from the
 // class.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
new file mode 100644
index 000..016ae74
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
@@ -0,0 +1,232 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * 

[42/50] [abbrv] hadoop git commit: HADOOP-14669. GenericTestUtils.waitFor should use monotonic time. Contributed by Daniel Templeton

2017-07-19 Thread xyao
HADOOP-14669. GenericTestUtils.waitFor should use monotonic time. Contributed 
by Daniel Templeton


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df180259
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df180259
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df180259

Branch: refs/heads/HDFS-7240
Commit: df180259b0cc3660e199e85447c7193bee51751c
Parents: 2843c68
Author: Jason Lowe 
Authored: Wed Jul 19 09:41:22 2017 -0500
Committer: Jason Lowe 
Committed: Wed Jul 19 09:41:22 2017 -0500

--
 .../src/test/java/org/apache/hadoop/test/GenericTestUtils.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df180259/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 38a0c6c..9291bb0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -356,10 +356,10 @@ public abstract class GenericTestUtils {
 Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
 ERROR_INVALID_ARGUMENT);
 
-long st = Time.now();
+long st = Time.monotonicNow();
 boolean result = check.get();
 
-while (!result && (Time.now() - st < waitForMillis)) {
+while (!result && (Time.monotonicNow() - st < waitForMillis)) {
   Thread.sleep(checkEveryMillis);
   result = check.get();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[23/50] [abbrv] hadoop git commit: HADOOP-14521. KMS client needs retry logic. Contributed by Rushabh S Shah.

2017-07-19 Thread xyao
HADOOP-14521. KMS client needs retry logic. Contributed by Rushabh S Shah.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a6d5c0c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a6d5c0c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a6d5c0c

Branch: refs/heads/HDFS-7240
Commit: 0a6d5c0cf1d963da9131aa12326fc576f0e92d2c
Parents: f413ee3
Author: Xiao Chen 
Authored: Fri Jul 14 22:14:29 2017 -0700
Committer: Xiao Chen 
Committed: Fri Jul 14 22:22:53 2017 -0700

--
 .../crypto/key/kms/KMSClientProvider.java   |  39 ++-
 .../key/kms/LoadBalancingKMSClientProvider.java |  78 -
 .../fs/CommonConfigurationKeysPublic.java   |  29 ++
 .../src/main/resources/core-default.xml |  28 ++
 .../kms/TestLoadBalancingKMSClientProvider.java | 315 ++-
 .../hadoop/hdfs/TestEncryptionZonesWithKMS.java |  19 +-
 6 files changed, 464 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a6d5c0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
index b3abd0c..20ad58c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/KMSClientProvider.java
@@ -309,9 +309,8 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
  * - HOSTNAME = string
  * - PORT = integer
  *
- * If multiple hosts are provider, the Factory will create a
- * {@link LoadBalancingKMSClientProvider} that round-robins requests
- * across the provided list of hosts.
+ * This will always create a {@link LoadBalancingKMSClientProvider}
+ * if the uri is correct.
  */
 @Override
 public KeyProvider createProvider(URI providerUri, Configuration conf)
@@ -338,30 +337,26 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   }
   hostsPart = t[0];
 }
-return createProvider(providerUri, conf, origUrl, port, hostsPart);
+return createProvider(conf, origUrl, port, hostsPart);
   }
   return null;
 }
 
-private KeyProvider createProvider(URI providerUri, Configuration conf,
+private KeyProvider createProvider(Configuration conf,
 URL origUrl, int port, String hostsPart) throws IOException {
   String[] hosts = hostsPart.split(";");
-  if (hosts.length == 1) {
-return new KMSClientProvider(providerUri, conf);
-  } else {
-KMSClientProvider[] providers = new KMSClientProvider[hosts.length];
-for (int i = 0; i < hosts.length; i++) {
-  try {
-providers[i] =
-new KMSClientProvider(
-new URI("kms", origUrl.getProtocol(), hosts[i], port,
-origUrl.getPath(), null, null), conf);
-  } catch (URISyntaxException e) {
-throw new IOException("Could not instantiate KMSProvider..", e);
-  }
+  KMSClientProvider[] providers = new KMSClientProvider[hosts.length];
+  for (int i = 0; i < hosts.length; i++) {
+try {
+  providers[i] =
+  new KMSClientProvider(
+  new URI("kms", origUrl.getProtocol(), hosts[i], port,
+  origUrl.getPath(), null, null), conf);
+} catch (URISyntaxException e) {
+  throw new IOException("Could not instantiate KMSProvider.", e);
 }
-return new LoadBalancingKMSClientProvider(providers, conf);
   }
+  return new LoadBalancingKMSClientProvider(providers, conf);
 }
   }
 
@@ -1078,7 +1073,11 @@ public class KMSClientProvider extends KeyProvider 
implements CryptoExtension,
   } catch (InterruptedException e) {
 Thread.currentThread().interrupt();
   } catch (Exception e) {
-throw new IOException(e);
+if (e instanceof IOException) {
+  throw (IOException) e;
+} else {
+  throw new IOException(e);
+}
   }
 }
 return tokens;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a6d5c0c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/crypto/key/kms/LoadBalancingKMSClientProvider.java
--
diff --git 

[32/50] [abbrv] hadoop git commit: HADOOP-14539. Move commons logging APIs over to slf4j in hadoop-common. Contributed by Wenxin He.

2017-07-19 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
index e4a8d0f..fbc1418 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GSet.java
@@ -19,9 +19,9 @@ package org.apache.hadoop.util;
 
 import java.util.Collection;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A {@link GSet} is set,
@@ -35,7 +35,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
  */
 @InterfaceAudience.Private
 public interface GSet extends Iterable {
-  static final Log LOG = LogFactory.getLog(GSet.class);
+  Logger LOG = LoggerFactory.getLogger(GSet.class);
 
   /**
* @return The size of this set.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
index 835206a..ac9776f 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
@@ -34,8 +34,6 @@ import org.apache.commons.cli.Option;
 import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -45,6 +43,8 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * GenericOptionsParser is a utility to parse command line
@@ -113,7 +113,8 @@ import org.apache.hadoop.security.UserGroupInformation;
 @InterfaceStability.Evolving
 public class GenericOptionsParser {
 
-  private static final Log LOG = LogFactory.getLog(GenericOptionsParser.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(GenericOptionsParser.class);
   private Configuration conf;
   private CommandLine commandLine;
   private final boolean parseSuccessful;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
index 340f792..67b0247 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/HostsFileReader.java
@@ -31,11 +31,11 @@ import javax.xml.parsers.DocumentBuilder;
 import javax.xml.parsers.DocumentBuilderFactory;
 import javax.xml.parsers.ParserConfigurationException;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience.Private;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.w3c.dom.Document;
 import org.w3c.dom.Element;
 import org.w3c.dom.Node;
@@ -47,7 +47,8 @@ import org.xml.sax.SAXException;
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Unstable
 public class HostsFileReader {
-  private static final Log LOG = LogFactory.getLog(HostsFileReader.class);
+  private static final Logger LOG = LoggerFactory.getLogger(HostsFileReader
+  .class);
 
   private final AtomicReference current;
 
@@ -171,7 +172,7 @@ public class HostsFileReader {
 }
   }
 } catch (IOException|SAXException|ParserConfigurationException e) {
-  LOG.fatal("error parsing " + filename, e);
+  LOG.error("error 

[49/50] [abbrv] hadoop git commit: Merge branch 'HDFS-7240' of https://git-wip-us.apache.org/repos/asf/hadoop into HDFS-7240

2017-07-19 Thread xyao
Merge branch 'HDFS-7240' of https://git-wip-us.apache.org/repos/asf/hadoop into 
HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84e11c7c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84e11c7c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84e11c7c

Branch: refs/heads/HDFS-7240
Commit: 84e11c7c01a928e9e101d3ca0a7be0547479ca12
Parents: 9a4246c a715f60
Author: Xiaoyu Yao 
Authored: Wed Jul 19 14:47:05 2017 -0700
Committer: Xiaoyu Yao 
Committed: Wed Jul 19 14:47:05 2017 -0700

--
 .../hadoop/util/concurrent/HadoopExecutors.java |  41 ++
 .../org/apache/hadoop/scm/ScmConfigKeys.java|  23 +
 .../server/datanode/ObjectStoreHandler.java |   3 +-
 .../checker/StorageLocationChecker.java |   8 +-
 .../org/apache/hadoop/ozone/OzoneBucket.java| 117 
 .../org/apache/hadoop/ozone/OzoneClient.java| 608 ---
 .../apache/hadoop/ozone/OzoneClientImpl.java| 519 
 .../apache/hadoop/ozone/OzoneConfigKeys.java|   9 +
 .../apache/hadoop/ozone/OzoneConfiguration.java |   7 +
 .../java/org/apache/hadoop/ozone/OzoneKey.java  | 120 
 .../org/apache/hadoop/ozone/OzoneVolume.java| 107 
 .../common/helpers/ContainerUtils.java  |  16 +-
 .../container/common/helpers/KeyUtils.java  |  20 +-
 .../common/impl/ContainerManagerImpl.java   |   6 +-
 .../container/common/impl/KeyManagerImpl.java   |  57 +-
 .../container/common/interfaces/KeyManager.java |  13 +-
 .../container/common/utils/ContainerCache.java  |  14 +-
 .../hadoop/ozone/io/OzoneInputStream.java   |  52 ++
 .../hadoop/ozone/io/OzoneOutputStream.java  |  62 ++
 .../apache/hadoop/ozone/io/package-info.java|  23 +
 .../apache/hadoop/ozone/ksm/BucketManager.java  |   2 +-
 .../org/apache/hadoop/ozone/ksm/KeyManager.java |   1 +
 .../hadoop/ozone/ksm/MetadataManager.java   |  32 +-
 .../hadoop/ozone/ksm/MetadataManagerImpl.java   | 181 +++---
 .../apache/hadoop/ozone/ksm/VolumeManager.java  |   3 +-
 .../hadoop/ozone/ksm/VolumeManagerImpl.java |  50 +-
 .../ozone/scm/StorageContainerManager.java  |  10 +-
 .../ozone/scm/block/BlockManagerImpl.java   |  67 +-
 .../org/apache/hadoop/ozone/scm/cli/SQLCLI.java | 112 ++--
 .../ozone/scm/container/ContainerMapping.java   |  19 +-
 .../ContainerReplicationManager.java| 306 ++
 .../container/replication/InProgressPool.java   | 302 +
 .../scm/container/replication/PeriodicPool.java | 119 
 .../scm/container/replication/package-info.java |  23 +
 .../ozone/scm/exceptions/SCMException.java  |   3 +-
 .../hadoop/ozone/scm/node/CommandQueue.java | 126 +++-
 .../hadoop/ozone/scm/node/NodeManager.java  |  10 +-
 .../hadoop/ozone/scm/node/NodePoolManager.java  |   5 +-
 .../hadoop/ozone/scm/node/SCMNodeManager.java   |  51 +-
 .../ozone/scm/node/SCMNodePoolManager.java  |  53 +-
 .../hadoop/ozone/web/client/OzoneBucket.java|  22 +-
 .../ozone/web/client/OzoneRestClient.java   |  17 +-
 .../hadoop/ozone/web/client/OzoneVolume.java|   6 +-
 .../web/handlers/BucketProcessTemplate.java |  20 +-
 .../ozone/web/handlers/KeyProcessTemplate.java  |   7 +
 .../web/handlers/VolumeProcessTemplate.java |   5 +-
 .../web/localstorage/OzoneMetadataManager.java  | 133 ++--
 .../apache/hadoop/ozone/web/ozShell/Shell.java  |   2 +
 .../web/ozShell/bucket/ListBucketHandler.java   |   2 +
 .../web/ozShell/volume/ListVolumeHandler.java   |  13 +-
 .../web/ozShell/volume/UpdateVolumeHandler.java |   4 +-
 .../hadoop/ozone/web/utils/OzoneUtils.java  |  24 +
 .../org/apache/hadoop/utils/BatchOperation.java |  90 +++
 .../org/apache/hadoop/utils/EntryConsumer.java  |  38 ++
 .../apache/hadoop/utils/LevelDBKeyFilters.java  |  65 --
 .../org/apache/hadoop/utils/LevelDBStore.java   | 182 +++---
 .../apache/hadoop/utils/MetadataKeyFilters.java |  65 ++
 .../org/apache/hadoop/utils/MetadataStore.java  | 152 +
 .../hadoop/utils/MetadataStoreBuilder.java  |  96 +++
 .../StorageContainerDatanodeProtocol.proto  |   5 +-
 .../src/main/resources/ozone-default.xml|  42 ++
 .../hadoop-hdfs/src/site/markdown/OzoneRest.md  |  14 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  12 +
 .../apache/hadoop/ozone/RatisTestHelper.java|  80 ++-
 .../apache/hadoop/ozone/TestLevelDBStore.java   | 165 -
 .../apache/hadoop/ozone/TestMetadataStore.java  | 296 +
 .../apache/hadoop/ozone/TestOzoneClient.java| 190 --
 .../hadoop/ozone/TestOzoneClientImpl.java   | 214 +++
 .../ReplicationDatanodeStateManager.java|  92 +++
 .../TestUtils/ReplicationNodeManagerMock.java   | 315 ++
 .../ReplicationNodePoolManagerMock.java | 132 
 .../ozone/container/TestUtils/package-info.java |  18 +
 

[36/50] [abbrv] hadoop git commit: Addendum patch for YARN-5731

2017-07-19 Thread xyao
Addendum patch for YARN-5731


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b7afc06
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b7afc06
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b7afc06

Branch: refs/heads/HDFS-7240
Commit: 0b7afc060c2024a882bd1934d0f722bfca731742
Parents: ccaf036
Author: Sunil G 
Authored: Tue Jul 18 11:49:09 2017 +0530
Committer: Sunil G 
Committed: Tue Jul 18 11:49:09 2017 +0530

--
 .../ProportionalCapacityPreemptionPolicy.java   | 25 --
 .../CapacitySchedulerConfiguration.java | 27 
 ...TestCapacitySchedulerSurgicalPreemption.java |  6 +++--
 3 files changed, 36 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b7afc06/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 719d2eb..fc8ad2b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -231,28 +231,13 @@ public class ProportionalCapacityPreemptionPolicy
   .add(new ReservedContainerCandidatesSelector(this));
 }
 
+boolean additionalPreemptionBasedOnReservedResource = csConfig.getBoolean(
+
CapacitySchedulerConfiguration.ADDITIONAL_RESOURCE_BALANCE_BASED_ON_RESERVED_CONTAINERS,
+
CapacitySchedulerConfiguration.DEFAULT_ADDITIONAL_RESOURCE_BALANCE_BASED_ON_RESERVED_CONTAINERS);
+
 // initialize candidates preemption selection policies
-// When select candidates for reserved containers is enabled, exclude 
reserved
-// resource in fifo policy (less aggressive). Otherwise include reserved
-// resource.
-//
-// Why doing this? In YARN-4390, we added 
preemption-based-on-reserved-container
-// Support. To reduce unnecessary preemption for large containers. We will
-// not include reserved resources while calculating ideal-allocation in
-// FifoCandidatesSelector.
-//
-// Changes in YARN-4390 will significantly reduce number of containers 
preempted
-// When cluster has heterogeneous container requests. (Please check test
-// report: 
https://issues.apache.org/jira/secure/attachment/12796197/YARN-4390-test-results.pdf
-//
-// However, on the other hand, in some corner cases, especially for
-// fragmented cluster. It could lead to preemption cannot kick in in some
-// cases. Please see YARN-5731.
-//
-// So to solve the problem, we will include reserved when surgical 
preemption
-// for reserved container, which reverts behavior when YARN-4390 is 
disabled.
 candidatesSelectionPolicies.add(new FifoCandidatesSelector(this,
-!selectCandidatesForResevedContainers));
+additionalPreemptionBasedOnReservedResource));
 
 // Do we need to specially consider intra queue
 boolean isIntraQueuePreemptionEnabled = csConfig.getBoolean(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b7afc06/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java
index 90a7e65..1e29d50 100644
--- 

[48/50] [abbrv] hadoop git commit: HADOOP-14666. Tests use assertTrue(....equals(...)) instead of assertEquals()

2017-07-19 Thread xyao
HADOOP-14666. Tests use assertTrue(equals(...)) instead of assertEquals()


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c21c2603
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c21c2603
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c21c2603

Branch: refs/heads/HDFS-7240
Commit: c21c26039238f633a0d2df9670f636d026c35649
Parents: 077fcf6
Author: Daniel Templeton 
Authored: Wed Jul 19 13:58:55 2017 -0700
Committer: Daniel Templeton 
Committed: Wed Jul 19 13:58:55 2017 -0700

--
 .../authentication/util/TestCertificateUtil.java |  6 --
 .../java/org/apache/hadoop/conf/TestDeprecatedKeys.java  |  2 +-
 .../apache/hadoop/crypto/key/TestKeyProviderFactory.java | 11 +++
 .../src/test/java/org/apache/hadoop/fs/TestHardLink.java |  2 +-
 .../security/alias/TestCredentialProviderFactory.java| 10 +-
 .../hadoop/security/authorize/TestAccessControlList.java |  8 
 .../apache/hadoop/util/TestReadWriteDiskValidator.java   |  5 +++--
 7 files changed, 25 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
index ce4176c..5794eb6 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.security.authentication.util;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -86,8 +88,8 @@ public class TestCertificateUtil {
 + "Mzc1xA==";
 try {
   RSAPublicKey pk = CertificateUtil.parseRSAPublicKey(pem);
-  assertTrue(pk != null);
-  assertTrue(pk.getAlgorithm().equals("RSA"));
+  assertNotNull(pk);
+  assertEquals("RSA", pk.getAlgorithm());
 } catch (ServletException se) {
   fail("Should not have thrown ServletException");
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
index 3036d0c..167daa5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
@@ -35,7 +35,7 @@ public class TestDeprecatedKeys extends TestCase {
 conf.set("topology.script.file.name", "xyz");
 conf.set("topology.script.file.name", "xyz");
 String scriptFile = 
conf.get(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
-assertTrue(scriptFile.equals("xyz")) ;
+assertEquals("xyz", scriptFile) ;
   }
   
   //Tests reading / writing a conf file with deprecation after setting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
index 53785bc..db30eb0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
@@ -189,8 +189,10 @@ public class TestKeyProviderFactory {
 assertTrue("Returned Keys should have included key4.", 
keys.contains("key4"));
 
 List kvl = provider.getKeyVersions("key3");
-assertTrue("KeyVersions should have been returned for key3.", kvl.size() 
== 1);
-assertTrue("KeyVersions should have included key3@0.", 
kvl.get(0).getVersionName().equals("key3@0"));
+

[17/50] [abbrv] hadoop git commit: HDFS-12137. DN dataset lock should be fair. Contributed by Daryn Sharp.

2017-07-19 Thread xyao
HDFS-12137. DN dataset lock should be fair. Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d86a939
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d86a939
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d86a939

Branch: refs/heads/HDFS-7240
Commit: 8d86a93915ee00318289535d9c78e48b75c8359d
Parents: a29fe10
Author: Kihwal Lee 
Authored: Fri Jul 14 15:41:43 2017 -0500
Committer: Kihwal Lee 
Committed: Fri Jul 14 15:41:43 2017 -0500

--
 .../hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java  | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d86a939/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 208d554..2544ff5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -39,6 +39,7 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.Executor;
 import java.util.concurrent.locks.Condition;
+import java.util.concurrent.locks.ReentrantLock;
 import java.util.concurrent.TimeUnit;
 
 import javax.management.NotCompliantMBeanException;
@@ -270,6 +271,7 @@ class FsDatasetImpl implements FsDatasetSpi {
 this.smallBufferSize = DFSUtilClient.getSmallBufferSize(conf);
 this.datasetLock = new AutoCloseableLock(
 new InstrumentedLock(getClass().getName(), LOG,
+  new ReentrantLock(true),
   conf.getTimeDuration(
 DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_KEY,
 DFSConfigKeys.DFS_LOCK_SUPPRESS_WARNING_INTERVAL_DEFAULT,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/50] [abbrv] hadoop git commit: HADOOP-14637. GenericTestUtils.waitFor needs to check condition again after max wait time. Contributed by Daniel Templeton

2017-07-19 Thread xyao
HADOOP-14637. GenericTestUtils.waitFor needs to check condition again after max 
wait time. Contributed by Daniel Templeton


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5aa2bf23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5aa2bf23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5aa2bf23

Branch: refs/heads/HDFS-7240
Commit: 5aa2bf231f40423865f0054ca27426ceb95ab4ba
Parents: f5f14a2
Author: Jason Lowe 
Authored: Tue Jul 18 16:23:41 2017 -0500
Committer: Jason Lowe 
Committed: Tue Jul 18 16:23:41 2017 -0500

--
 .../apache/hadoop/test/GenericTestUtils.java| 39 ++--
 1 file changed, 27 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5aa2bf23/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 82a5e08..38a0c6c 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -335,25 +335,40 @@ public abstract class GenericTestUtils {
 }
   }  
 
+  /**
+   * Wait for the specified test to return true. The test will be performed
+   * initially and then every {@code checkEveryMillis} until at least
+   * {@code waitForMillis} time has expired. If {@code check} is null or
+   * {@code waitForMillis} is less than {@code checkEveryMillis} this method
+   * will throw an {@link IllegalArgumentException}.
+   *
+   * @param check the test to perform
+   * @param checkEveryMillis how often to perform the test
+   * @param waitForMillis the amount of time after which no more tests will be
+   * performed
+   * @throws TimeoutException if the test does not return true in the allotted
+   * time
+   * @throws InterruptedException if the method is interrupted while waiting
+   */
   public static void waitFor(Supplier check, int checkEveryMillis,
   int waitForMillis) throws TimeoutException, InterruptedException {
 Preconditions.checkNotNull(check, ERROR_MISSING_ARGUMENT);
-Preconditions.checkArgument(waitForMillis > checkEveryMillis,
+Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
 ERROR_INVALID_ARGUMENT);
 
 long st = Time.now();
-do {
-  boolean result = check.get();
-  if (result) {
-return;
-  }
-  
+boolean result = check.get();
+
+while (!result && (Time.now() - st < waitForMillis)) {
   Thread.sleep(checkEveryMillis);
-} while (Time.now() - st < waitForMillis);
-
-throw new TimeoutException("Timed out waiting for condition. " +
-"Thread diagnostics:\n" +
-TimedOutTestsListener.buildThreadDiagnosticString());
+  result = check.get();
+}
+
+if (!result) {
+  throw new TimeoutException("Timed out waiting for condition. " +
+  "Thread diagnostics:\n" +
+  TimedOutTestsListener.buildThreadDiagnosticString());
+}
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[33/50] [abbrv] hadoop git commit: HADOOP-14539. Move commons logging APIs over to slf4j in hadoop-common. Contributed by Wenxin He.

2017-07-19 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
index 1074e87..994eb13 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRates.java
@@ -24,12 +24,11 @@ import java.util.Set;
 import static com.google.common.base.Preconditions.*;
 import com.google.common.collect.Sets;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Helper class to manage a group of mutable rate metrics
@@ -43,7 +42,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class MutableRates extends MutableMetric {
-  static final Log LOG = LogFactory.getLog(MutableRates.class);
+  static final Logger LOG = LoggerFactory.getLogger(MutableRates.class);
   private final MetricsRegistry registry;
   private final Set protocolCache = Sets.newHashSet();
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
index 9827ca7..26a1506 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/MutableRatesWithAggregation.java
@@ -27,12 +27,12 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentLinkedDeque;
 import java.util.concurrent.ConcurrentMap;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.metrics2.util.SampleStat;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 
 /**
@@ -48,7 +48,8 @@ import org.apache.hadoop.metrics2.util.SampleStat;
 @InterfaceAudience.Public
 @InterfaceStability.Evolving
 public class MutableRatesWithAggregation extends MutableMetric {
-  static final Log LOG = LogFactory.getLog(MutableRatesWithAggregation.class);
+  static final Logger LOG =
+  LoggerFactory.getLogger(MutableRatesWithAggregation.class);
   private final Map globalMetrics =
   new ConcurrentHashMap<>();
   private final Set protocolCache = Sets.newHashSet();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
index 5c58d52..de4c14d 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/sink/GraphiteSink.java
@@ -19,8 +19,6 @@
 package org.apache.hadoop.metrics2.sink;
 
 import org.apache.commons.configuration2.SubsetConfiguration;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.metrics2.AbstractMetric;
@@ -28,6 +26,8 @@ import org.apache.hadoop.metrics2.MetricsException;
 import org.apache.hadoop.metrics2.MetricsRecord;
 import org.apache.hadoop.metrics2.MetricsSink;
 import org.apache.hadoop.metrics2.MetricsTag;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -42,7 +42,8 @@ import java.nio.charset.StandardCharsets;
 

[01/50] [abbrv] hadoop git commit: HDFS-12105. Ozone: listVolumes doesn't work from ozone commandline. Contributed by Yiqun Lin.

2017-07-19 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 a715f60ce -> b3a7f3b2d


HDFS-12105. Ozone: listVolumes doesn't work from ozone commandline. Contributed 
by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef9ba833
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef9ba833
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef9ba833

Branch: refs/heads/HDFS-7240
Commit: ef9ba8332c90497f4d5383d7661ba3f03c874d6d
Parents: 6798e6d
Author: Weiwei Yang 
Authored: Mon Jul 10 10:24:22 2017 +0800
Committer: Xiaoyu Yao 
Committed: Wed Jul 12 17:11:46 2017 -0700

--
 .../ozone/web/client/OzoneRestClient.java   | 60 ++-
 .../web/ozShell/volume/ListVolumeHandler.java   | 34 +--
 .../hadoop/ozone/web/client/TestVolume.java | 62 +++-
 3 files changed, 132 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef9ba833/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
index 7c144ad..ebb824a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/client/OzoneRestClient.java
@@ -204,24 +204,26 @@ public class OzoneRestClient implements Closeable {
* List all the volumes owned by the user or Owned by the user specified in
* the behalf of string.
*
-   * @param onBehalfOf - User Name of the user if it is not the caller. for
-   *   example, an admin wants to list some other users
-   *   volumes.
-   * @param prefix - Return only volumes that match this prefix.
-   * @param maxKeys- Maximum number of results to return, if the result set
-   *   is smaller than requested size, it means that list is
-   *   complete.
-   * @param prevKey- The last key that client got, server will continue
-   *   returning results from that point.
+   * @param onBehalfOf
+   *  User Name of the user if it is not the caller. for example,
+   *  an admin wants to list some other users volumes.
+   * @param prefix
+   *   Return only volumes that match this prefix.
+   * @param maxKeys
+   *   Maximum number of results to return, if the result set
+   *   is smaller than requested size, it means that list is
+   *   complete.
+   * @param startVolume
+   *   The previous volume name.
* @return List of Volumes
* @throws OzoneException
*/
-  public List listVolumes(String onBehalfOf, String prefix, int
-  maxKeys, OzoneVolume prevKey) throws OzoneException {
+  public List listVolumes(String onBehalfOf, String prefix,
+  int maxKeys, String startVolume) throws OzoneException {
 HttpGet httpGet = null;
 try (CloseableHttpClient httpClient = newHttpClient()) {
   URIBuilder builder = new URIBuilder(endPointURI);
-  if (prefix != null) {
+  if (!Strings.isNullOrEmpty(prefix)) {
 builder.addParameter(Header.OZONE_LIST_QUERY_PREFIX, prefix);
   }
 
@@ -230,9 +232,9 @@ public class OzoneRestClient implements Closeable {
 .toString(maxKeys));
   }
 
-  if (prevKey != null) {
+  if (!Strings.isNullOrEmpty(startVolume)) {
 builder.addParameter(Header.OZONE_LIST_QUERY_PREVKEY,
-prevKey.getOwnerName() + "/" + prevKey.getVolumeName());
+startVolume);
   }
 
   builder.setPath("/").build();
@@ -250,6 +252,33 @@ public class OzoneRestClient implements Closeable {
   }
 
   /**
+   * List all the volumes owned by the user or Owned by the user specified in
+   * the behalf of string.
+   *
+   * @param onBehalfOf - User Name of the user if it is not the caller. for
+   *   example, an admin wants to list some other users
+   *   volumes.
+   * @param prefix - Return only volumes that match this prefix.
+   * @param maxKeys- Maximum number of results to return, if the result set
+   *   is smaller than requested size, it means that list is
+   *   complete.
+   * @param prevKey- The last key that client got, server will continue
+   *   returning results from that point.
+   * @return List of Volumes
+   * @throws OzoneException
+   */
+  public List listVolumes(String onBehalfOf, String prefix,
+  int maxKeys, OzoneVolume 

[21/50] [abbrv] hadoop git commit: HDFS-12130. Optimizing permission check for getContentSummary. Contributed by Chen Liang

2017-07-19 Thread xyao
HDFS-12130. Optimizing permission check for getContentSummary.  Contributed by  
Chen Liang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f413ee33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f413ee33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f413ee33

Branch: refs/heads/HDFS-7240
Commit: f413ee33df301659c4ca9024380c2354983dcc84
Parents: a1f12bb
Author: Tsz-Wo Nicholas Sze 
Authored: Fri Jul 14 14:35:51 2017 -0700
Committer: Tsz-Wo Nicholas Sze 
Committed: Fri Jul 14 14:35:51 2017 -0700

--
 .../server/blockmanagement/BlockCollection.java |   4 +-
 .../ContentSummaryComputationContext.java   |  20 ++
 .../namenode/DirectoryWithQuotaFeature.java |   4 +-
 .../server/namenode/FSDirStatAndListingOp.java  |   9 +-
 .../server/namenode/FSPermissionChecker.java|  32 +++
 .../hadoop/hdfs/server/namenode/INode.java  |   9 +-
 .../hdfs/server/namenode/INodeDirectory.java|   9 +-
 .../hdfs/server/namenode/INodeReference.java|   3 +-
 .../snapshot/DirectorySnapshottableFeature.java |   3 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |   3 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |   4 +-
 .../TestGetContentSummaryWithPermission.java| 201 +++
 12 files changed, 285 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 2f214be..b880590 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.security.AccessControlException;
 
 /** 
  * This interface is used by the block manager to expose a
@@ -36,7 +37,8 @@ public interface BlockCollection {
   /** 
* Get content summary.
*/
-  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
+  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
+  throws AccessControlException;
 
   /**
* @return the number of blocks or block groups

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f413ee33/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 8d5aa0d..43e6f0d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -20,11 +20,14 @@ package org.apache.hadoop.hdfs.server.namenode;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.security.AccessControlException;
+
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
@@ -46,6 +49,8 @@ public class ContentSummaryComputationContext {
 
   public static final String REPLICATED = "Replicated";
   public static final Log LOG = LogFactory.getLog(INode.class);
+
+  private FSPermissionChecker pc;
   /**
* Constructor
*
@@ -57,6 +62,12 @@ public class ContentSummaryComputationContext {
*/
   public ContentSummaryComputationContext(FSDirectory dir,
   FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
+this(dir, fsn, limitPerRun, sleepMicroSec, null);
+  }
+
+  public 

[40/50] [abbrv] hadoop git commit: YARN-6819. Application report fails if app rejected due to nodesize. Contributed by Bibin A Chundatt.

2017-07-19 Thread xyao
YARN-6819. Application report fails if app rejected due to nodesize. 
Contributed by Bibin A Chundatt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/845c4e52
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/845c4e52
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/845c4e52

Branch: refs/heads/HDFS-7240
Commit: 845c4e52bdd579a24df5dbba7477b0ebf2fa16f1
Parents: daaf530
Author: Rohith Sharma K S 
Authored: Wed Jul 19 11:10:52 2017 +0530
Committer: Rohith Sharma K S 
Committed: Wed Jul 19 11:10:52 2017 +0530

--
 .../resourcemanager/recovery/RMStateStore.java  |  5 ++--
 .../resourcemanager/rmapp/RMAppEvent.java   | 24 
 .../resourcemanager/rmapp/RMAppEventType.java   |  1 +
 .../server/resourcemanager/rmapp/RMAppImpl.java |  8 +++
 .../recovery/TestZKRMStateStore.java| 14 +++-
 .../rmapp/TestRMAppTransitions.java | 17 ++
 6 files changed, 33 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/845c4e52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
index e945b59..d0a8cf5 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java
@@ -221,8 +221,9 @@ public abstract class RMStateStore extends AbstractService {
   } catch (Exception e) {
 LOG.error("Error storing app: " + appId, e);
 if (e instanceof StoreLimitException) {
-  store.notifyApplication(new RMAppEvent(appId,
-  RMAppEventType.APP_REJECTED, e.getMessage(), false));
+  store.notifyApplication(
+  new RMAppEvent(appId, RMAppEventType.APP_SAVE_FAILED,
+  e.getMessage()));
 } else {
   isFenced = store.notifyStoreOperationFailedInternal(e);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/845c4e52/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
index 0c6139e..5c46945 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppEvent.java
@@ -25,7 +25,6 @@ public class RMAppEvent extends AbstractEvent{
 
   private final ApplicationId appId;
   private final String diagnosticMsg;
-  private boolean storeAppInfo;
 
   public RMAppEvent(ApplicationId appId, RMAppEventType type) {
 this(appId, type, "");
@@ -36,21 +35,6 @@ public class RMAppEvent extends 
AbstractEvent{
 super(type);
 this.appId = appId;
 this.diagnosticMsg = diagnostic;
-this.storeAppInfo = true;
-  }
-
-  /**
-   * Constructor to create RM Application Event type.
-   *
-   * @param appId application Id
-   * @param type RM Event type
-   * @param diagnostic Diagnostic message for event
-   * @param storeApp Application should be saved or not
-   */
-  public RMAppEvent(ApplicationId appId, RMAppEventType type, String 
diagnostic,
-  boolean storeApp) {
-this(appId, type, diagnostic);
-this.storeAppInfo = storeApp;
   }
 
   public ApplicationId getApplicationId() {
@@ -61,12 +45,4 @@ public class RMAppEvent extends 
AbstractEvent{
 return this.diagnosticMsg;
   }
 
-  /**
-   * Store application to 

[07/50] [abbrv] hadoop git commit: YARN-6654. RollingLevelDBTimelineStore backwards incompatible after fst upgrade. Contributed by Jonathan Eagles

2017-07-19 Thread xyao
YARN-6654. RollingLevelDBTimelineStore backwards incompatible after fst 
upgrade. Contributed by Jonathan Eagles


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5f1ee72b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5f1ee72b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5f1ee72b

Branch: refs/heads/HDFS-7240
Commit: 5f1ee72b0ebf0330417b7c0115083bc851923be4
Parents: 945c095
Author: Jason Lowe 
Authored: Thu Jul 13 17:27:40 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 17:27:40 2017 -0500

--
 .../timeline/RollingLevelDBTimelineStore.java   | 50 
 1 file changed, 41 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5f1ee72b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
index d139346..00f6630 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/RollingLevelDBTimelineStore.java
@@ -28,6 +28,7 @@ import java.util.Collection;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.EnumSet;
+import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Map.Entry;
@@ -74,6 +75,7 @@ import org.iq80.leveldb.Options;
 import org.iq80.leveldb.ReadOptions;
 import org.iq80.leveldb.WriteBatch;
 import org.nustaq.serialization.FSTConfiguration;
+import org.nustaq.serialization.FSTClazzNameRegistry;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 
@@ -170,9 +172,22 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   .getLog(RollingLevelDBTimelineStore.class);
   private static FSTConfiguration fstConf =
   FSTConfiguration.createDefaultConfiguration();
+  // Fall back to 2.24 parsing if 2.50 parsing fails
+  private static FSTConfiguration fstConf224 =
+  FSTConfiguration.createDefaultConfiguration();
+  // Static class code for 2.24
+  private static final int LINKED_HASH_MAP_224_CODE = 83;
 
   static {
 fstConf.setShareReferences(false);
+fstConf224.setShareReferences(false);
+// YARN-6654 unable to find class for code 83 (LinkedHashMap)
+// The linked hash map was changed between 2.24 and 2.50 so that
+// the static code for LinkedHashMap (83) was changed to a dynamic
+// code.
+FSTClazzNameRegistry registry = fstConf224.getClassRegistry();
+registry.registerClass(
+LinkedHashMap.class, LINKED_HASH_MAP_224_CODE, fstConf224);
   }
 
   @Private
@@ -339,7 +354,7 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   deletionThread.start();
 }
 super.serviceStart();
-   }
+  }
 
   @Override
   protected void serviceStop() throws Exception {
@@ -365,7 +380,7 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
 private final long ttl;
 private final long ttlInterval;
 
-public EntityDeletionThread(Configuration conf) {
+EntityDeletionThread(Configuration conf) {
   ttl = conf.getLong(TIMELINE_SERVICE_TTL_MS,
   DEFAULT_TIMELINE_SERVICE_TTL_MS);
   ttlInterval = conf.getLong(
@@ -479,9 +494,15 @@ public class RollingLevelDBTimelineStore extends 
AbstractService implements
   try {
 o = fstConf.asObject(iterator.peekNext().getValue());
 entity.addOtherInfo(keyStr, o);
-  } catch (Exception e) {
-LOG.warn("Error while decoding "
-+ entityId + ":otherInfo:" + keyStr, e);
+  } catch (Exception ignore) {
+try {
+  // Fall back to 2.24 parser
+  o = fstConf224.asObject(iterator.peekNext().getValue());
+  entity.addOtherInfo(keyStr, o);
+} catch (Exception e) {
+  LOG.warn("Error while decoding "
+  + entityId + ":otherInfo:" + keyStr, e);
+}
   }
 }
   } else if (key[prefixlen] == 

[28/50] [abbrv] hadoop git commit: HDFS-12138. Remove redundant 'public' modifiers from BlockCollection. Contributed by Chen Liang

2017-07-19 Thread xyao
HDFS-12138. Remove redundant 'public' modifiers from BlockCollection.  
Contributed by Chen Liang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed27f2b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed27f2b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed27f2b2

Branch: refs/heads/HDFS-7240
Commit: ed27f2b2cc6093865367b98a100dcd42b2c6b89d
Parents: b0e78ae
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Jul 17 13:54:16 2017 -0700
Committer: Tsz-Wo Nicholas Sze 
Committed: Mon Jul 17 13:54:16 2017 -0700

--
 .../server/blockmanagement/BlockCollection.java | 22 ++--
 1 file changed, 11 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed27f2b2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index b880590..c0dfc14 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -32,62 +32,62 @@ public interface BlockCollection {
   /**
* Get the last block of the collection.
*/
-  public BlockInfo getLastBlock();
+  BlockInfo getLastBlock();
 
   /** 
* Get content summary.
*/
-  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
+  ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
   throws AccessControlException;
 
   /**
* @return the number of blocks or block groups
*/ 
-  public int numBlocks();
+  int numBlocks();
 
   /**
* Get the blocks (striped or contiguous).
*/
-  public BlockInfo[] getBlocks();
+  BlockInfo[] getBlocks();
 
   /**
* Get preferred block size for the collection 
* @return preferred block size in bytes
*/
-  public long getPreferredBlockSize();
+  long getPreferredBlockSize();
 
   /**
* Get block replication for the collection.
* @return block replication value. Return 0 if the file is erasure coded.
*/
-  public short getPreferredBlockReplication();
+  short getPreferredBlockReplication();
 
   /**
* @return the storage policy ID.
*/
-  public byte getStoragePolicyID();
+  byte getStoragePolicyID();
 
   /**
* Get the name of the collection.
*/
-  public String getName();
+  String getName();
 
   /**
* Set the block (contiguous or striped) at the given index.
*/
-  public void setBlock(int index, BlockInfo blk);
+  void setBlock(int index, BlockInfo blk);
 
   /**
* Convert the last block of the collection to an under-construction block
* and set the locations.
*/
-  public void convertLastBlockToUC(BlockInfo lastBlock,
+  void convertLastBlockToUC(BlockInfo lastBlock,
   DatanodeStorageInfo[] targets) throws IOException;
 
   /**
* @return whether the block collection is under construction.
*/
-  public boolean isUnderConstruction();
+  boolean isUnderConstruction();
 
   /**
* @return whether the block collection is in striping format


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/50] [abbrv] hadoop git commit: Merge branch 'HDFS-7240' of https://git-wip-us.apache.org/repos/asf/hadoop into HDFS-7240

2017-07-19 Thread xyao
Merge branch 'HDFS-7240' of https://git-wip-us.apache.org/repos/asf/hadoop into 
HDFS-7240


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9a4246c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9a4246c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9a4246c8

Branch: refs/heads/HDFS-7240
Commit: 9a4246c80a40ccc1f53036bed2c7402644c9da65
Parents: ef9ba83 87154fc
Author: Xiaoyu Yao 
Authored: Wed Jul 12 17:19:05 2017 -0700
Committer: Xiaoyu Yao 
Committed: Wed Jul 12 17:19:05 2017 -0700

--

--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/50] [abbrv] hadoop git commit: YARN-6805. NPE in LinuxContainerExecutor due to null PrivilegedOperationException exit code. Contributed by Jason Lowe

2017-07-19 Thread xyao
YARN-6805. NPE in LinuxContainerExecutor due to null 
PrivilegedOperationException exit code. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ebc048cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ebc048cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ebc048cc

Branch: refs/heads/HDFS-7240
Commit: ebc048cc055d0f7d1b85bc0b6f56cd15673e837d
Parents: 0ffca5d
Author: Jason Lowe 
Authored: Thu Jul 13 17:44:47 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 17:44:47 2017 -0500

--
 .../nodemanager/LinuxContainerExecutor.java | 19 +++--
 .../PrivilegedOperationException.java   | 10 +--
 .../runtime/ContainerExecutionException.java| 10 +--
 .../TestLinuxContainerExecutorWithMocks.java| 89 
 4 files changed, 111 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ebc048cc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 9a3b2d2..2aaa835 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -275,6 +275,10 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
   }
 
+  protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
+return PrivilegedOperationExecutor.getInstance(getConf());
+  }
+
   @Override
   public void init() throws IOException {
 Configuration conf = super.getConf();
@@ -285,7 +289,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   PrivilegedOperation checkSetupOp = new PrivilegedOperation(
   PrivilegedOperation.OperationType.CHECK_SETUP);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(checkSetupOp,
   false);
@@ -382,7 +386,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
   initializeContainerOp, null, null, false, true);
@@ -530,8 +534,9 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
 builder.append("Stack trace: "
 + StringUtils.stringifyException(e) + "\n");
-if (!e.getOutput().isEmpty()) {
-  builder.append("Shell output: " + e.getOutput() + "\n");
+String output = e.getOutput();
+if (output != null && !e.getOutput().isEmpty()) {
+  builder.append("Shell output: " + output + "\n");
 }
 String diagnostics = builder.toString();
 logOutput(diagnostics);
@@ -729,7 +734,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(deleteAsUserOp,
   false);
@@ -759,7 +764,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
 try {
   PrivilegedOperationExecutor privOpExecutor =
-  PrivilegedOperationExecutor.getInstance(super.getConf());
+  getPrivilegedOperationExecutor();
 
   String results =
   privOpExecutor.executePrivilegedOperation(listAsUserOp, true);
@@ -818,7 +823,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
   mountCGroupsOp.appendArgs(cgroupKVs);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  

[18/50] [abbrv] hadoop git commit: HDFS-12140. Remove BPOfferService lock contention to get block pool id. Contributed by Daryn Sharp.

2017-07-19 Thread xyao
HDFS-12140. Remove BPOfferService lock contention to get block pool id. 
Contributed by Daryn Sharp.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7d187a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7d187a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7d187a1

Branch: refs/heads/HDFS-7240
Commit: e7d187a1b6a826edd5bd0f708184d48f3674d489
Parents: 8d86a93
Author: Kihwal Lee 
Authored: Fri Jul 14 16:07:17 2017 -0500
Committer: Kihwal Lee 
Committed: Fri Jul 14 16:07:17 2017 -0500

--
 .../hdfs/server/datanode/BPOfferService.java| 47 ++--
 .../server/datanode/TestBPOfferService.java | 29 
 2 files changed, 63 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7d187a1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 0384f26..dbf7c8d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -72,6 +72,7 @@ class BPOfferService {
   volatile DatanodeRegistration bpRegistration;
 
   private final String nameserviceId;
+  private volatile String bpId;
   private final DataNode dn;
 
   /**
@@ -184,6 +185,11 @@ class BPOfferService {
   }
 
   String getBlockPoolId(boolean quiet) {
+// avoid lock contention unless the registration hasn't completed.
+String id = bpId;
+if (id != null) {
+  return id;
+}
 readLock();
 try {
   if (bpNSInfo != null) {
@@ -205,7 +211,7 @@ class BPOfferService {
   }
 
   boolean hasBlockPoolId() {
-return getNamespaceInfo() != null;
+return getBlockPoolId(true) != null;
   }
 
   NamespaceInfo getNamespaceInfo() {
@@ -217,6 +223,28 @@ class BPOfferService {
 }
   }
 
+  @VisibleForTesting
+  NamespaceInfo setNamespaceInfo(NamespaceInfo nsInfo) throws IOException {
+writeLock();
+try {
+  NamespaceInfo old = bpNSInfo;
+  if (bpNSInfo != null && nsInfo != null) {
+checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(),
+"Blockpool ID");
+checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(),
+"Namespace ID");
+checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(),
+"Cluster ID");
+  }
+  bpNSInfo = nsInfo;
+  // cache the block pool id for lock-free access.
+  bpId = (nsInfo != null) ? nsInfo.getBlockPoolID() : null;
+  return old;
+} finally {
+  writeUnlock();
+}
+  }
+
   @Override
   public String toString() {
 readLock();
@@ -289,9 +317,10 @@ class BPOfferService {
   private void checkBlock(ExtendedBlock block) {
 Preconditions.checkArgument(block != null,
 "block is null");
-
Preconditions.checkArgument(block.getBlockPoolId().equals(getBlockPoolId()),
+final String bpId = getBlockPoolId();
+Preconditions.checkArgument(block.getBlockPoolId().equals(bpId),
 "block belongs to BP %s instead of BP %s",
-block.getBlockPoolId(), getBlockPoolId());
+block.getBlockPoolId(), bpId);
   }
 
   //This must be called only by blockPoolManager
@@ -337,8 +366,7 @@ class BPOfferService {
 }
 
 try {
-  if (this.bpNSInfo == null) {
-this.bpNSInfo = nsInfo;
+  if (setNamespaceInfo(nsInfo) == null) {
 boolean success = false;
 
 // Now that we know the namespace ID, etc, we can pass this to the DN.
@@ -352,16 +380,9 @@ class BPOfferService {
 // The datanode failed to initialize the BP. We need to reset
 // the namespace info so that other BPService actors still have
 // a chance to set it, and re-initialize the datanode.
-this.bpNSInfo = null;
+setNamespaceInfo(null);
   }
 }
-  } else {
-checkNSEquality(bpNSInfo.getBlockPoolID(), nsInfo.getBlockPoolID(),
-"Blockpool ID");
-checkNSEquality(bpNSInfo.getNamespaceID(), nsInfo.getNamespaceID(),
-"Namespace ID");
-checkNSEquality(bpNSInfo.getClusterID(), nsInfo.getClusterID(),
-"Cluster ID");
   }
 } finally {
   writeUnlock();


[05/50] [abbrv] hadoop git commit: HADOOP-14646. FileContextMainOperationsBaseTest#testListStatusFilterWithSomeMatches never runs. Contributed by Andras Bokor.

2017-07-19 Thread xyao
HADOOP-14646. 
FileContextMainOperationsBaseTest#testListStatusFilterWithSomeMatches never 
runs. Contributed by Andras Bokor.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b61ab857
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b61ab857
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b61ab857

Branch: refs/heads/HDFS-7240
Commit: b61ab8573eb2f224481118004f620fe9f18db74b
Parents: cf0d084
Author: Masatake Iwasaki 
Authored: Thu Jul 13 21:41:43 2017 +0900
Committer: Masatake Iwasaki 
Committed: Thu Jul 13 21:41:43 2017 +0900

--
 .../org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b61ab857/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
index a536e57..35ec4ff 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java
@@ -391,6 +391,7 @@ public abstract class FileContextMainOperationsBaseTest  {
 
   }
   
+  @Test
   public void testListStatusFilterWithSomeMatches() throws Exception {
 Path[] testDirs = {
 getTestRootPath(fc, TEST_DIR_AAA),


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[14/50] [abbrv] hadoop git commit: YARN-6759. Fix TestRMRestart.testRMRestartWaitForPreviousAMToFinish failure. Contributed by Naganarasimha G R

2017-07-19 Thread xyao
YARN-6759. Fix TestRMRestart.testRMRestartWaitForPreviousAMToFinish failure. 
Contributed by Naganarasimha G R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75c0220b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75c0220b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75c0220b

Branch: refs/heads/HDFS-7240
Commit: 75c0220b4494dd4424a0c531e0bf0a763748dc62
Parents: 4a574e9
Author: bibinchundatt 
Authored: Fri Jul 14 13:53:39 2017 +0530
Committer: bibinchundatt 
Committed: Fri Jul 14 13:53:39 2017 +0530

--
 .../apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75c0220b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
index 139e2da..955b4b6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java
@@ -105,9 +105,9 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.TestSchedulerUtils;
-import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
 import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils;
@@ -622,7 +622,7 @@ public class TestRMRestart extends 
ParameterizedSchedulerTestBase {
 return new Boolean(rmAppForCheck.getAppAttempts().size() == 4);
   }
 },
-100, maxRetry);
+100, maxRetry * 100);
 Assert.assertEquals(RMAppAttemptState.FAILED,
 rmApp.getAppAttempts().get(latestAppAttemptId).getAppAttemptState());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: Revert "YARN-6805. NPE in LinuxContainerExecutor due to null PrivilegedOperationException exit code. Contributed by Jason Lowe"

2017-07-19 Thread xyao
Revert "YARN-6805. NPE in LinuxContainerExecutor due to null 
PrivilegedOperationException exit code. Contributed by Jason Lowe"

This reverts commit f76f5c0919cdb0b032edb309d137093952e77268.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0ffca5d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0ffca5d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0ffca5d3

Branch: refs/heads/HDFS-7240
Commit: 0ffca5d347df0acb1979dff7a07ae88ea834adc7
Parents: f76f5c0
Author: Jason Lowe 
Authored: Thu Jul 13 17:42:38 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 17:42:38 2017 -0500

--
 .../nodemanager/LinuxContainerExecutor.java | 19 ++---
 .../PrivilegedOperationException.java   | 10 +--
 .../runtime/ContainerExecutionException.java| 10 +--
 .../TestLinuxContainerExecutorWithMocks.java| 89 
 4 files changed, 17 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0ffca5d3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 47b99c2..9a3b2d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -275,10 +275,6 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
   }
 
-  protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
-return PrivilegedOperationExecutor.getInstance(getConf());
-  }
-
   @Override
   public void init() throws IOException {
 Configuration conf = super.getConf();
@@ -289,7 +285,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   PrivilegedOperation checkSetupOp = new PrivilegedOperation(
   PrivilegedOperation.OperationType.CHECK_SETUP);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  getPrivilegedOperationExecutor();
+  PrivilegedOperationExecutor.getInstance(conf);
 
   privilegedOperationExecutor.executePrivilegedOperation(checkSetupOp,
   false);
@@ -386,7 +382,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  getPrivilegedOperationExecutor();
+  PrivilegedOperationExecutor.getInstance(conf);
 
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
   initializeContainerOp, null, null, false, true);
@@ -534,9 +530,8 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
 builder.append("Stack trace: "
 + StringUtils.stringifyException(e) + "\n");
-String output = e.getOutput();
-if (output!= null && !e.getOutput().isEmpty()) {
-  builder.append("Shell output: " + output + "\n");
+if (!e.getOutput().isEmpty()) {
+  builder.append("Shell output: " + e.getOutput() + "\n");
 }
 String diagnostics = builder.toString();
 logOutput(diagnostics);
@@ -734,7 +729,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  getPrivilegedOperationExecutor();
+  PrivilegedOperationExecutor.getInstance(conf);
 
   privilegedOperationExecutor.executePrivilegedOperation(deleteAsUserOp,
   false);
@@ -764,7 +759,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
 try {
   PrivilegedOperationExecutor privOpExecutor =
-  getPrivilegedOperationExecutor();
+  PrivilegedOperationExecutor.getInstance(super.getConf());
 
   String results =
   privOpExecutor.executePrivilegedOperation(listAsUserOp, true);
@@ -823,7 +818,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
   mountCGroupsOp.appendArgs(cgroupKVs);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  

[30/50] [abbrv] hadoop git commit: HADOOP-14539. Move commons logging APIs over to slf4j in hadoop-common. Contributed by Wenxin He.

2017-07-19 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
index cbc9943..cebebd2 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcUtil.java
@@ -19,8 +19,6 @@ package org.apache.hadoop.oncrpc;
 
 import java.nio.ByteBuffer;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.buffer.ChannelBuffers;
 import org.jboss.netty.channel.Channel;
@@ -29,6 +27,8 @@ import org.jboss.netty.channel.Channels;
 import org.jboss.netty.channel.MessageEvent;
 import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
 import org.jboss.netty.handler.codec.frame.FrameDecoder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 public final class RpcUtil {
   /**
@@ -63,7 +63,8 @@ public final class RpcUtil {
* each RPC client.
*/
   static class RpcFrameDecoder extends FrameDecoder {
-public static final Log LOG = LogFactory.getLog(RpcFrameDecoder.class);
+public static final Logger LOG =
+LoggerFactory.getLogger(RpcFrameDecoder.class);
 private ChannelBuffer currentFrame;
 
 @Override
@@ -107,8 +108,8 @@ public final class RpcUtil {
* request into a RpcInfo instance.
*/
   static final class RpcMessageParserStage extends 
SimpleChannelUpstreamHandler {
-private static final Log LOG = LogFactory
-.getLog(RpcMessageParserStage.class);
+private static final Logger LOG = LoggerFactory
+.getLogger(RpcMessageParserStage.class);
 
 @Override
 public void messageReceived(ChannelHandlerContext ctx, MessageEvent e)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java
index b72153a..23b6682 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpClientHandler.java
@@ -17,20 +17,21 @@
  */
 package org.apache.hadoop.oncrpc;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.jboss.netty.buffer.ChannelBuffer;
 import org.jboss.netty.channel.ChannelHandlerContext;
 import org.jboss.netty.channel.ChannelStateEvent;
 import org.jboss.netty.channel.ExceptionEvent;
 import org.jboss.netty.channel.MessageEvent;
 import org.jboss.netty.channel.SimpleChannelHandler;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A simple TCP based RPC client handler used by {@link SimpleTcpServer}.
  */
 public class SimpleTcpClientHandler extends SimpleChannelHandler {
-  public static final Log LOG = LogFactory.getLog(SimpleTcpClient.class);
+  public static final Logger LOG =
+  LoggerFactory.getLogger(SimpleTcpClient.class);
   protected final XDR request;
 
   public SimpleTcpClientHandler(XDR request) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
index bd48b15..177fa3d 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/SimpleTcpServer.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.oncrpc;
 import java.net.InetSocketAddress;
 import java.util.concurrent.Executors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.jboss.netty.bootstrap.ServerBootstrap;
 import org.jboss.netty.channel.Channel;
 import org.jboss.netty.channel.ChannelFactory;
@@ -30,12 +28,15 @@ import org.jboss.netty.channel.ChannelPipelineFactory;
 import org.jboss.netty.channel.Channels;
 import org.jboss.netty.channel.SimpleChannelUpstreamHandler;
 import org.jboss.netty.channel.socket.nio.NioServerSocketChannelFactory;
+import org.slf4j.Logger;
+import 

[31/50] [abbrv] hadoop git commit: HADOOP-14539. Move commons logging APIs over to slf4j in hadoop-common. Contributed by Wenxin He.

2017-07-19 Thread xyao
http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
index 4bda637..3416746 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java
@@ -58,8 +58,6 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import javax.net.SocketFactory;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -96,12 +94,13 @@ import org.mockito.stubbing.Answer;
 import com.google.common.base.Supplier;
 import com.google.common.primitives.Bytes;
 import com.google.common.primitives.Ints;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
 
 /** Unit tests for IPC. */
 public class TestIPC {
-  public static final Log LOG =
-LogFactory.getLog(TestIPC.class);
+  public static final Logger LOG = LoggerFactory.getLogger(TestIPC.class);
   
   private static Configuration conf;
   final static int PING_INTERVAL = 1000;
@@ -230,12 +229,12 @@ public class TestIPC {
   final long param = RANDOM.nextLong();
   LongWritable value = call(client, param, server, conf);
   if (value.get() != param) {
-LOG.fatal("Call failed!");
+LOG.error("Call failed!");
 failed = true;
 break;
   }
 } catch (Exception e) {
-  LOG.fatal("Caught: " + StringUtils.stringifyException(e));
+  LOG.error("Caught: " + StringUtils.stringifyException(e));
   failed = true;
 }
   }
@@ -784,7 +783,7 @@ public class TestIPC {
 call(client, new LongWritable(Thread.currentThread().getId()),
 addr, 6, conf);
   } catch (Throwable e) {
-LOG.error(e);
+LOG.error(e.toString());
 failures.incrementAndGet();
 return;
   } finally {
@@ -895,7 +894,7 @@ public class TestIPC {
   callBarrier.await();
 }
   } catch (Throwable t) {
-LOG.error(t);
+LOG.error(t.toString());
 error.set(true); 
   } 
 }
@@ -917,7 +916,7 @@ public class TestIPC {
   callReturned.countDown();
   Thread.sleep(1);
 } catch (IOException e) {
-  LOG.error(e);
+  LOG.error(e.toString());
 } catch (InterruptedException e) {
 } finally {
   client.stop();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ccaf0366/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
index 546cb8f..7d7905e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPCServerResponder.java
@@ -32,8 +32,6 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicReference;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.BytesWritable;
@@ -45,6 +43,8 @@ import org.apache.hadoop.ipc.Server.Call;
 import org.apache.hadoop.net.NetUtils;
 import org.junit.Assert;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * This test provokes partial writes in the server, which is 
@@ -52,8 +52,8 @@ import org.junit.Test;
  */
 public class TestIPCServerResponder {
 
-  public static final Log LOG = 
-LogFactory.getLog(TestIPCServerResponder.class);
+  public static final Logger LOG =
+LoggerFactory.getLogger(TestIPCServerResponder.class);
 
   private static Configuration conf = new Configuration();
 
@@ -126,7 +126,7 @@ public class TestIPCServerResponder {
   call(client, param, address);
   Thread.sleep(RANDOM.nextInt(20));
 } catch (Exception e) {
-  LOG.fatal("Caught Exception", e);
+  LOG.error("Caught 

[08/50] [abbrv] hadoop git commit: YARN-6805. NPE in LinuxContainerExecutor due to null PrivilegedOperationException exit code. Contributed by Jason Lowe

2017-07-19 Thread xyao
YARN-6805. NPE in LinuxContainerExecutor due to null 
PrivilegedOperationException exit code. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f76f5c09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f76f5c09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f76f5c09

Branch: refs/heads/HDFS-7240
Commit: f76f5c0919cdb0b032edb309d137093952e77268
Parents: 5f1ee72
Author: Jason Lowe 
Authored: Thu Jul 13 17:38:17 2017 -0500
Committer: Jason Lowe 
Committed: Thu Jul 13 17:38:17 2017 -0500

--
 .../nodemanager/LinuxContainerExecutor.java | 19 +++--
 .../PrivilegedOperationException.java   | 10 +--
 .../runtime/ContainerExecutionException.java| 10 +--
 .../TestLinuxContainerExecutorWithMocks.java| 89 
 4 files changed, 111 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f76f5c09/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
index 9a3b2d2..47b99c2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/LinuxContainerExecutor.java
@@ -275,6 +275,10 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
   }
 
+  protected PrivilegedOperationExecutor getPrivilegedOperationExecutor() {
+return PrivilegedOperationExecutor.getInstance(getConf());
+  }
+
   @Override
   public void init() throws IOException {
 Configuration conf = super.getConf();
@@ -285,7 +289,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
   PrivilegedOperation checkSetupOp = new PrivilegedOperation(
   PrivilegedOperation.OperationType.CHECK_SETUP);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(checkSetupOp,
   false);
@@ -382,7 +386,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(prefixCommands,
   initializeContainerOp, null, null, false, true);
@@ -530,8 +534,9 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 }
 builder.append("Stack trace: "
 + StringUtils.stringifyException(e) + "\n");
-if (!e.getOutput().isEmpty()) {
-  builder.append("Shell output: " + e.getOutput() + "\n");
+String output = e.getOutput();
+if (output!= null && !e.getOutput().isEmpty()) {
+  builder.append("Shell output: " + output + "\n");
 }
 String diagnostics = builder.toString();
 logOutput(diagnostics);
@@ -729,7 +734,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 try {
   Configuration conf = super.getConf();
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  getPrivilegedOperationExecutor();
 
   privilegedOperationExecutor.executePrivilegedOperation(deleteAsUserOp,
   false);
@@ -759,7 +764,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
 try {
   PrivilegedOperationExecutor privOpExecutor =
-  PrivilegedOperationExecutor.getInstance(super.getConf());
+  getPrivilegedOperationExecutor();
 
   String results =
   privOpExecutor.executePrivilegedOperation(listAsUserOp, true);
@@ -818,7 +823,7 @@ public class LinuxContainerExecutor extends 
ContainerExecutor {
 
   mountCGroupsOp.appendArgs(cgroupKVs);
   PrivilegedOperationExecutor privilegedOperationExecutor =
-  PrivilegedOperationExecutor.getInstance(conf);
+  

[13/50] [abbrv] hadoop git commit: YARN-6769. Make schedulables without demand less needy in FairSharePolicy#compare. (Yunfan Zhou via Yufei Gu)

2017-07-19 Thread xyao
YARN-6769. Make schedulables without demand less needy in 
FairSharePolicy#compare. (Yunfan Zhou via Yufei Gu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4a574e9a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4a574e9a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4a574e9a

Branch: refs/heads/HDFS-7240
Commit: 4a574e9a84f2e997038452b22f2ad2a2d42e8ac8
Parents: 228ddaa
Author: Yufei Gu 
Authored: Thu Jul 13 23:10:10 2017 -0700
Committer: Yufei Gu 
Committed: Thu Jul 13 23:10:10 2017 -0700

--
 .../scheduler/fair/policies/FairSharePolicy.java | 17 +++--
 .../scheduler/fair/TestSchedulingPolicy.java | 19 ---
 2 files changed, 27 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a574e9a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
index c3ec47a..2a852aa 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java
@@ -58,6 +58,9 @@ public class FairSharePolicy extends SchedulingPolicy {
   /**
* Compare Schedulables via weighted fair sharing. In addition, Schedulables
* below their min share get priority over those whose min share is met.
+   *
+   * Schedulables without resource demand get lower priority than
+   * ones who have demands.
* 
* Schedulables below their min share are compared by how far below it they
* are as a ratio. For example, if job A has 8 out of a min share of 10 tasks
@@ -79,6 +82,16 @@ public class FairSharePolicy extends SchedulingPolicy {
 
 @Override
 public int compare(Schedulable s1, Schedulable s2) {
+  Resource demand1 = s1.getDemand();
+  Resource demand2 = s2.getDemand();
+  if (demand1.equals(Resources.none()) && Resources.greaterThan(
+  RESOURCE_CALCULATOR, null, demand2, Resources.none())) {
+return 1;
+  } else if (demand2.equals(Resources.none()) && Resources.greaterThan(
+  RESOURCE_CALCULATOR, null, demand1, Resources.none())) {
+return -1;
+  }
+
   double minShareRatio1, minShareRatio2;
   double useToWeightRatio1, useToWeightRatio2;
   double weight1, weight2;
@@ -86,9 +99,9 @@ public class FairSharePolicy extends SchedulingPolicy {
   Resource resourceUsage1 = s1.getResourceUsage();
   Resource resourceUsage2 = s2.getResourceUsage();
   Resource minShare1 = Resources.min(RESOURCE_CALCULATOR, null,
-  s1.getMinShare(), s1.getDemand());
+  s1.getMinShare(), demand1);
   Resource minShare2 = Resources.min(RESOURCE_CALCULATOR, null,
-  s2.getMinShare(), s2.getDemand());
+  s2.getMinShare(), demand2);
   boolean s1Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,
   resourceUsage1, minShare1);
   boolean s2Needy = Resources.lessThan(RESOURCE_CALCULATOR, null,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4a574e9a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
index d84f0cf..3a16454 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestSchedulingPolicy.java
+++ 

[02/50] [abbrv] hadoop git commit: HDFS-12037. Ozone: Improvement rest API output format for better looking. Contributed by Weiwei Yang.

2017-07-19 Thread xyao
HDFS-12037. Ozone: Improvement rest API output format for better looking. 
Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6798e6dd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6798e6dd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6798e6dd

Branch: refs/heads/HDFS-7240
Commit: 6798e6dd71316008fcec9fb891689e1c66311608
Parents: 3bc9202
Author: Weiwei Yang 
Authored: Sat Jul 8 10:06:58 2017 +0800
Committer: Xiaoyu Yao 
Committed: Wed Jul 12 17:11:46 2017 -0700

--
 .../java/org/apache/hadoop/ozone/web/response/BucketInfo.java | 3 ++-
 .../main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java   | 3 ++-
 .../java/org/apache/hadoop/ozone/web/response/ListBuckets.java| 3 ++-
 .../main/java/org/apache/hadoop/ozone/web/response/ListKeys.java  | 3 ++-
 .../java/org/apache/hadoop/ozone/web/response/ListVolumes.java| 3 ++-
 .../java/org/apache/hadoop/ozone/web/response/VolumeInfo.java | 3 ++-
 6 files changed, 12 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6798e6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
index 1e47c16..53c7119 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/BucketInfo.java
@@ -56,7 +56,8 @@ public class BucketInfo implements Comparable {
 mapper.setVisibility(PropertyAccessor.FIELD, 
JsonAutoDetect.Visibility.ANY);
 mapper.addMixIn(Object.class, MixIn.class);
 
-WRITER = mapper.writer(filters);
+mapper.setFilterProvider(filters);
+WRITER = mapper.writerWithDefaultPrettyPrinter();
   }
 
   private String volumeName;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6798e6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
index 69be5b9..e5cfd21 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/KeyInfo.java
@@ -54,7 +54,8 @@ public class KeyInfo implements Comparable {
 JsonAutoDetect.Visibility.ANY);
 mapper.addMixIn(Object.class, MixIn.class);
 
-WRITER = mapper.writer(filters);
+mapper.setFilterProvider(filters);
+WRITER = mapper.writerWithDefaultPrettyPrinter();
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6798e6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
index 3b0d32e..bc4e65b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListBuckets.java
@@ -55,7 +55,8 @@ public class ListBuckets {
 JsonAutoDetect.Visibility.ANY);
 mapper.addMixIn(Object.class, MixIn.class);
 
-WRITER = mapper.writer(filters);
+mapper.setFilterProvider(filters);
+WRITER = mapper.writerWithDefaultPrettyPrinter();
   }
 
   private List buckets;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6798e6dd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
index fd76e4a..9dc77d2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/response/ListKeys.java
@@ -58,7 +58,8 @@ 

[06/50] [abbrv] hadoop git commit: YARN-6775. CapacityScheduler: Improvements to assignContainers, avoid unnecessary canAssignToUser/Queue calls. (Nathan Roberts via wangda)

2017-07-19 Thread xyao
YARN-6775. CapacityScheduler: Improvements to assignContainers, avoid 
unnecessary canAssignToUser/Queue calls. (Nathan Roberts via wangda)

Change-Id: I84ccd54200ccbaae23018ef320028e42b4c3509a


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/945c0958
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/945c0958
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/945c0958

Branch: refs/heads/HDFS-7240
Commit: 945c0958bb8df3dd9d5f1467f1216d2e6b0ee3d8
Parents: b61ab85
Author: Wangda Tan 
Authored: Thu Jul 13 10:30:15 2017 -0700
Committer: Wangda Tan 
Committed: Thu Jul 13 10:30:15 2017 -0700

--
 .../scheduler/activities/ActivitiesLogger.java  |  33 +++--
 .../scheduler/capacity/LeafQueue.java   |  83 ---
 .../capacity/TestCapacityScheduler.java | 146 ++-
 .../scheduler/capacity/TestLeafQueue.java   |  10 +-
 4 files changed, 231 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/945c0958/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
index 3f8ed55..12aff02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
@@ -63,9 +63,14 @@ public class ActivitiesLogger {
 SchedulerApplicationAttempt application, Priority priority,
 String diagnostic) {
   String type = "app";
-  recordActivity(activitiesManager, node, application.getQueueName(),
-  application.getApplicationId().toString(), priority,
-  ActivityState.REJECTED, diagnostic, type);
+  if (activitiesManager == null) {
+return;
+  }
+  if (activitiesManager.shouldRecordThisNode(node.getNodeID())) {
+recordActivity(activitiesManager, node, application.getQueueName(),
+application.getApplicationId().toString(), priority,
+ActivityState.REJECTED, diagnostic, type);
+  }
   finishSkippedAppAllocationRecording(activitiesManager,
   application.getApplicationId(), ActivityState.REJECTED, diagnostic);
 }
@@ -203,8 +208,13 @@ public class ActivitiesLogger {
 public static void recordQueueActivity(ActivitiesManager activitiesManager,
 SchedulerNode node, String parentQueueName, String queueName,
 ActivityState state, String diagnostic) {
-  recordActivity(activitiesManager, node, parentQueueName, queueName, null,
-  state, diagnostic, null);
+  if (activitiesManager == null) {
+return;
+  }
+  if (activitiesManager.shouldRecordThisNode(node.getNodeID())) {
+recordActivity(activitiesManager, node, parentQueueName, queueName,
+null, state, diagnostic, null);
+  }
 }
   }
 
@@ -266,13 +276,10 @@ public class ActivitiesLogger {
   private static void recordActivity(ActivitiesManager activitiesManager,
   SchedulerNode node, String parentName, String childName,
   Priority priority, ActivityState state, String diagnostic, String type) {
-if (activitiesManager == null) {
-  return;
-}
-if (activitiesManager.shouldRecordThisNode(node.getNodeID())) {
-  activitiesManager.addSchedulingActivityForNode(node.getNodeID(),
-  parentName, childName, priority != null ? priority.toString() : null,
-  state, diagnostic, type);
-}
+
+activitiesManager.addSchedulingActivityForNode(node.getNodeID(), 
parentName,
+childName, priority != null ? priority.toString() : null, state,
+diagnostic, type);
+
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/945c0958/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java
--
diff --git 

[29/50] [abbrv] hadoop git commit: YARN-6706. Refactor ContainerScheduler to make oversubscription change easier. (Haibo Chen via asuresh)

2017-07-19 Thread xyao
YARN-6706. Refactor ContainerScheduler to make oversubscription change easier. 
(Haibo Chen via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b007921
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b007921
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b007921

Branch: refs/heads/HDFS-7240
Commit: 5b007921cdf01ecc8ed97c164b7d327b8304c529
Parents: ed27f2b
Author: Arun Suresh 
Authored: Mon Jul 17 14:07:23 2017 -0700
Committer: Arun Suresh 
Committed: Mon Jul 17 14:11:14 2017 -0700

--
 .../scheduler/ContainerScheduler.java   | 135 +--
 .../TestContainerManagerRecovery.java   |   2 +-
 .../TestContainerSchedulerQueuing.java  |  85 
 3 files changed, 177 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b007921/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
index 24530b3..19243ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/scheduler/ContainerScheduler.java
@@ -192,7 +192,9 @@ public class ContainerScheduler extends AbstractService 
implements
 // decrement only if it was a running container
 Container completedContainer = runningContainers.remove(container
 .getContainerId());
-if (completedContainer != null) {
+// only a running container releases resources upon completion
+boolean resourceReleased = completedContainer != null;
+if (resourceReleased) {
   this.utilizationTracker.subtractContainerResource(container);
   if (container.getContainerTokenIdentifier().getExecutionType() ==
   ExecutionType.OPPORTUNISTIC) {
@@ -218,8 +220,7 @@ public class ContainerScheduler extends AbstractService 
implements
 boolean resourcesAvailable = true;
 while (cIter.hasNext() && resourcesAvailable) {
   Container container = cIter.next();
-  if (this.utilizationTracker.hasResourcesAvailable(container)) {
-startAllocatedContainer(container);
+  if (tryStartContainer(container)) {
 cIter.remove();
   } else {
 resourcesAvailable = false;
@@ -228,50 +229,95 @@ public class ContainerScheduler extends AbstractService 
implements
 return resourcesAvailable;
   }
 
-  @VisibleForTesting
-  protected void scheduleContainer(Container container) {
-if (maxOppQueueLength <= 0) {
-  startAllocatedContainer(container);
-  return;
+  private boolean tryStartContainer(Container container) {
+boolean containerStarted = false;
+if (resourceAvailableToStartContainer(container)) {
+  startContainer(container);
+  containerStarted = true;
 }
-if (queuedGuaranteedContainers.isEmpty() &&
-queuedOpportunisticContainers.isEmpty() &&
-this.utilizationTracker.hasResourcesAvailable(container)) {
-  startAllocatedContainer(container);
+return containerStarted;
+  }
+
+  /**
+   * Check if there is resource available to start a given container
+   * immediately. (This can be extended to include overallocated resources)
+   * @param container the container to start
+   * @return true if container can be launched directly
+   */
+  private boolean resourceAvailableToStartContainer(Container container) {
+return this.utilizationTracker.hasResourcesAvailable(container);
+  }
+
+  private boolean enqueueContainer(Container container) {
+boolean isGuaranteedContainer = container.getContainerTokenIdentifier().
+getExecutionType() == ExecutionType.GUARANTEED;
+
+boolean isQueued;
+if (isGuaranteedContainer) {
+  queuedGuaranteedContainers.put(container.getContainerId(), container);
+  isQueued = true;
 } else {
-  LOG.info("No available resources for container {} to start its execution 
"
-  + "immediately.", container.getContainerId());
-  boolean isQueued = true;
- 

[15/50] [abbrv] hadoop git commit: YARN-3260. AM attempt fail to register before RM processes launch event. Contributed by Bibin A Chundatt

2017-07-19 Thread xyao
YARN-3260. AM attempt fail to register before RM processes launch event. 
Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5ae5ac5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5ae5ac5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5ae5ac5

Branch: refs/heads/HDFS-7240
Commit: a5ae5ac50e97cf829c41dcf01655cd9bd4d36a00
Parents: 75c0220
Author: Jason Lowe 
Authored: Fri Jul 14 14:56:00 2017 -0500
Committer: Jason Lowe 
Committed: Fri Jul 14 14:56:00 2017 -0500

--
 .../rmapp/attempt/RMAppAttemptImpl.java | 20 +++-
 .../attempt/TestRMAppAttemptTransitions.java| 33 ++--
 2 files changed, 28 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5ae5ac5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
index d66a97d..4210c54 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
@@ -1205,6 +1205,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
 @Override
 public void transition(RMAppAttemptImpl appAttempt,
 RMAppAttemptEvent event) {
+
+  appAttempt.registerClientToken();
   appAttempt.launchAttempt();
 }
   }
@@ -1525,13 +1527,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   // Register with AMLivelinessMonitor
   appAttempt.attemptLaunched();
 
-  // register the ClientTokenMasterKey after it is saved in the store,
-  // otherwise client may hold an invalid ClientToken after RM restarts.
-  if (UserGroupInformation.isSecurityEnabled()) {
-appAttempt.rmContext.getClientToAMTokenSecretManager()
-.registerApplication(appAttempt.getAppAttemptId(),
-appAttempt.getClientTokenMasterKey());
-  }
 }
   }
 
@@ -1598,11 +1593,20 @@ public class RMAppAttemptImpl implements RMAppAttempt, 
Recoverable {
   appAttempt.amrmToken =
   
appAttempt.rmContext.getAMRMTokenSecretManager().createAndGetAMRMToken(
 appAttempt.applicationAttemptId);
-
+  appAttempt.registerClientToken();
   super.transition(appAttempt, event);
 }
   }
 
+  private void registerClientToken() {
+// register the ClientTokenMasterKey after it is saved in the store,
+// otherwise client may hold an invalid ClientToken after RM restarts.
+if (UserGroupInformation.isSecurityEnabled()) {
+  rmContext.getClientToAMTokenSecretManager()
+  .registerApplication(getAppAttemptId(), getClientTokenMasterKey());
+}
+  }
+
   private static final class LaunchFailedTransition extends 
BaseFinalTransition {
 
 public LaunchFailedTransition() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5ae5ac5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
index 9a4b6dc..7702ab1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
+++ 

[24/50] [abbrv] hadoop git commit: HADOOP-14662. Update azure-storage sdk to version 5.4.0. Contributed by Thomas Marquardt.

2017-07-19 Thread xyao
HADOOP-14662. Update azure-storage sdk to version 5.4.0.
Contributed by Thomas Marquardt.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06ece483
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06ece483
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06ece483

Branch: refs/heads/HDFS-7240
Commit: 06ece483222b82404ee198159c6866db89043459
Parents: 0a6d5c0
Author: Steve Loughran 
Authored: Sat Jul 15 16:27:17 2017 +0100
Committer: Steve Loughran 
Committed: Sat Jul 15 16:27:17 2017 +0100

--
 hadoop-project/pom.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06ece483/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 3969474..b9819b4 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -1118,7 +1118,7 @@
   
 com.microsoft.azure
 azure-storage
-5.3.0
+5.4.0
  
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/50] [abbrv] hadoop git commit: HDFS-12130. Optimizing permission check for getContentSummary.

2017-07-19 Thread xyao
HDFS-12130. Optimizing permission check for getContentSummary.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a29fe100
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a29fe100
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a29fe100

Branch: refs/heads/HDFS-7240
Commit: a29fe100b3c671954b759add5923a2b44af9e6a4
Parents: a5ae5ac
Author: Tsz-Wo Nicholas Sze 
Authored: Fri Jul 14 11:53:00 2017 -0700
Committer: Tsz-Wo Nicholas Sze 
Committed: Fri Jul 14 13:36:27 2017 -0700

--
 .../server/blockmanagement/BlockCollection.java |   4 +-
 .../ContentSummaryComputationContext.java   |  20 ++
 .../namenode/DirectoryWithQuotaFeature.java |   4 +-
 .../server/namenode/FSDirStatAndListingOp.java  |   9 +-
 .../server/namenode/FSPermissionChecker.java|  32 +++
 .../hadoop/hdfs/server/namenode/INode.java  |   9 +-
 .../hdfs/server/namenode/INodeDirectory.java|   9 +-
 .../hdfs/server/namenode/INodeReference.java|   3 +-
 .../snapshot/DirectorySnapshottableFeature.java |   3 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |   3 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |   4 +-
 .../TestGetContentSummaryWithPermission.java| 201 +++
 12 files changed, 285 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 2f214be..b880590 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -21,6 +21,7 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.security.AccessControlException;
 
 /** 
  * This interface is used by the block manager to expose a
@@ -36,7 +37,8 @@ public interface BlockCollection {
   /** 
* Get content summary.
*/
-  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
+  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
+  throws AccessControlException;
 
   /**
* @return the number of blocks or block groups

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a29fe100/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 8d5aa0d..43e6f0d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -20,11 +20,14 @@ package org.apache.hadoop.hdfs.server.namenode;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.security.AccessControlException;
+
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
@@ -46,6 +49,8 @@ public class ContentSummaryComputationContext {
 
   public static final String REPLICATED = "Replicated";
   public static final Log LOG = LogFactory.getLog(INode.class);
+
+  private FSPermissionChecker pc;
   /**
* Constructor
*
@@ -57,6 +62,12 @@ public class ContentSummaryComputationContext {
*/
   public ContentSummaryComputationContext(FSDirectory dir,
   FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
+this(dir, fsn, limitPerRun, sleepMicroSec, null);
+  }
+
+  public ContentSummaryComputationContext(FSDirectory dir,

[12/50] [abbrv] hadoop git commit: YARN-6792. Incorrect XML convertion in NodeIDsInfo and LabelsToNodesInfo. Contributed by Giovanni Matteo Fumarola.

2017-07-19 Thread xyao
YARN-6792. Incorrect XML convertion in NodeIDsInfo and LabelsToNodesInfo. 
Contributed by Giovanni Matteo Fumarola.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/228ddaa3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/228ddaa3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/228ddaa3

Branch: refs/heads/HDFS-7240
Commit: 228ddaa31d812533b862576445494bc2cd8a2884
Parents: 43f0503
Author: Sunil G 
Authored: Fri Jul 14 08:07:05 2017 +0530
Committer: Sunil G 
Committed: Fri Jul 14 08:07:05 2017 +0530

--
 .../hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java | 5 -
 .../server/resourcemanager/webapp/dao/LabelsToNodesInfo.java   | 6 +-
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/228ddaa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
index c23b02a..5f45b96 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodeIDsInfo.java
@@ -26,7 +26,10 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
 
-@XmlRootElement(name = "labelsToNodesInfo")
+/**
+ * XML element uses to represent NodeIds' list.
+ */
+@XmlRootElement(name = "nodeIDsInfo")
 @XmlAccessorType(XmlAccessType.FIELD)
 public class NodeIDsInfo {
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/228ddaa3/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
index 41dd410..e842d42 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/LabelsToNodesInfo.java
@@ -37,7 +37,11 @@ public class LabelsToNodesInfo {
   public LabelsToNodesInfo() {
   } // JAXB needs this
 
+  public LabelsToNodesInfo(Map labelsToNodes) {
+this.labelsToNodes = labelsToNodes;
+  }
+
   public Map getLabelsToNodes() {
-   return labelsToNodes;
+return labelsToNodes;
   }
 }
\ No newline at end of file


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/50] [abbrv] hadoop git commit: Revert "HDFS-12130. Optimizing permission check for getContentSummary." to fix commit message.

2017-07-19 Thread xyao
Revert "HDFS-12130. Optimizing permission check for getContentSummary." to fix 
commit message.

This reverts commit a29fe100b3c671954b759add5923a2b44af9e6a4.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1f12bb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1f12bb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1f12bb5

Branch: refs/heads/HDFS-7240
Commit: a1f12bb543778ddc243205eaa962e99da4d8f135
Parents: 9e0cde1
Author: Tsz-Wo Nicholas Sze 
Authored: Fri Jul 14 14:34:01 2017 -0700
Committer: Tsz-Wo Nicholas Sze 
Committed: Fri Jul 14 14:34:01 2017 -0700

--
 .../server/blockmanagement/BlockCollection.java |   4 +-
 .../ContentSummaryComputationContext.java   |  20 --
 .../namenode/DirectoryWithQuotaFeature.java |   4 +-
 .../server/namenode/FSDirStatAndListingOp.java  |   9 +-
 .../server/namenode/FSPermissionChecker.java|  32 ---
 .../hadoop/hdfs/server/namenode/INode.java  |   9 +-
 .../hdfs/server/namenode/INodeDirectory.java|   9 +-
 .../hdfs/server/namenode/INodeReference.java|   3 +-
 .../snapshot/DirectorySnapshottableFeature.java |   3 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |   3 +-
 .../hdfs/server/namenode/snapshot/Snapshot.java |   4 +-
 .../TestGetContentSummaryWithPermission.java| 201 ---
 12 files changed, 16 insertions(+), 285 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index b880590..2f214be 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -21,7 +21,6 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ContentSummary;
-import org.apache.hadoop.security.AccessControlException;
 
 /** 
  * This interface is used by the block manager to expose a
@@ -37,8 +36,7 @@ public interface BlockCollection {
   /** 
* Get content summary.
*/
-  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps)
-  throws AccessControlException;
+  public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
 
   /**
* @return the number of blocks or block groups

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f12bb5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 43e6f0d..8d5aa0d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -20,14 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.security.AccessControlException;
-
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.IOException;
@@ -49,8 +46,6 @@ public class ContentSummaryComputationContext {
 
   public static final String REPLICATED = "Replicated";
   public static final Log LOG = LogFactory.getLog(INode.class);
-
-  private FSPermissionChecker pc;
   /**
* Constructor
*
@@ -62,12 +57,6 @@ public class ContentSummaryComputationContext {
*/
   public ContentSummaryComputationContext(FSDirectory dir,
   FSNamesystem fsn, long limitPerRun, long sleepMicroSec) {
-this(dir, fsn, 

[04/50] [abbrv] hadoop git commit: YARN-5731. Preemption calculation is not accurate when reserved containers are present in queue. Contributed by Wangda Tan.

2017-07-19 Thread xyao
YARN-5731. Preemption calculation is not accurate when reserved containers are 
present in queue. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cf0d0844
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cf0d0844
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cf0d0844

Branch: refs/heads/HDFS-7240
Commit: cf0d0844d6ae25d537391edb9b65fca05d1848e6
Parents: e15e271
Author: Sunil G 
Authored: Thu Jul 13 16:48:29 2017 +0530
Committer: Sunil G 
Committed: Thu Jul 13 16:48:29 2017 +0530

--
 .../capacity/FifoCandidatesSelector.java|  6 +-
 .../ProportionalCapacityPreemptionPolicy.java   | 22 -
 .../CapacitySchedulerPreemptionTestBase.java|  7 +-
 ...TestCapacitySchedulerSurgicalPreemption.java | 97 +++-
 4 files changed, 125 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf0d0844/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
index f4d7e92..f843db4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/FifoCandidatesSelector.java
@@ -43,12 +43,12 @@ public class FifoCandidatesSelector
   LogFactory.getLog(FifoCandidatesSelector.class);
   private PreemptableResourceCalculator preemptableAmountCalculator;
 
-  FifoCandidatesSelector(
-  CapacitySchedulerPreemptionContext preemptionContext) {
+  FifoCandidatesSelector(CapacitySchedulerPreemptionContext preemptionContext,
+  boolean includeReservedResource) {
 super(preemptionContext);
 
 preemptableAmountCalculator = new PreemptableResourceCalculator(
-preemptionContext, false);
+preemptionContext, includeReservedResource);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cf0d0844/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 76d6637..719d2eb 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -232,7 +232,27 @@ public class ProportionalCapacityPreemptionPolicy
 }
 
 // initialize candidates preemption selection policies
-candidatesSelectionPolicies.add(new FifoCandidatesSelector(this));
+// When select candidates for reserved containers is enabled, exclude 
reserved
+// resource in fifo policy (less aggressive). Otherwise include reserved
+// resource.
+//
+// Why doing this? In YARN-4390, we added 
preemption-based-on-reserved-container
+// Support. To reduce unnecessary preemption for large containers. We will
+// not include reserved resources while calculating ideal-allocation in
+// FifoCandidatesSelector.
+//
+// Changes in YARN-4390 will significantly reduce number of containers 
preempted
+// When cluster has heterogeneous container requests. (Please check test
+// report: 

hadoop git commit: YARN-6775. CapacityScheduler: Improvements to assignContainers, avoid unnecessary canAssignToUser/Queue calls. (Nathan Roberts via wangda)

2017-07-19 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1d629c95a -> 5070c9ba3


YARN-6775. CapacityScheduler: Improvements to assignContainers, avoid 
unnecessary canAssignToUser/Queue calls. (Nathan Roberts via wangda)

Change-Id: Iaf9bb7e5ed3aa1300abdccf87ae6fcbddbd25e3e


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5070c9ba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5070c9ba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5070c9ba

Branch: refs/heads/branch-2
Commit: 5070c9ba39d1f9c6b46ce9df299584ea3ed5f506
Parents: 1d629c9
Author: Wangda Tan 
Authored: Wed Jul 19 15:25:21 2017 -0700
Committer: Wangda Tan 
Committed: Wed Jul 19 15:25:21 2017 -0700

--
 .../scheduler/activities/ActivitiesLogger.java  |  33 +++--
 .../scheduler/capacity/LeafQueue.java   |  83 ---
 .../capacity/TestCapacityScheduler.java | 145 +++
 .../scheduler/capacity/TestLeafQueue.java   |  10 +-
 4 files changed, 231 insertions(+), 40 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5070c9ba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
index 3f8ed55..12aff02 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/activities/ActivitiesLogger.java
@@ -63,9 +63,14 @@ public class ActivitiesLogger {
 SchedulerApplicationAttempt application, Priority priority,
 String diagnostic) {
   String type = "app";
-  recordActivity(activitiesManager, node, application.getQueueName(),
-  application.getApplicationId().toString(), priority,
-  ActivityState.REJECTED, diagnostic, type);
+  if (activitiesManager == null) {
+return;
+  }
+  if (activitiesManager.shouldRecordThisNode(node.getNodeID())) {
+recordActivity(activitiesManager, node, application.getQueueName(),
+application.getApplicationId().toString(), priority,
+ActivityState.REJECTED, diagnostic, type);
+  }
   finishSkippedAppAllocationRecording(activitiesManager,
   application.getApplicationId(), ActivityState.REJECTED, diagnostic);
 }
@@ -203,8 +208,13 @@ public class ActivitiesLogger {
 public static void recordQueueActivity(ActivitiesManager activitiesManager,
 SchedulerNode node, String parentQueueName, String queueName,
 ActivityState state, String diagnostic) {
-  recordActivity(activitiesManager, node, parentQueueName, queueName, null,
-  state, diagnostic, null);
+  if (activitiesManager == null) {
+return;
+  }
+  if (activitiesManager.shouldRecordThisNode(node.getNodeID())) {
+recordActivity(activitiesManager, node, parentQueueName, queueName,
+null, state, diagnostic, null);
+  }
 }
   }
 
@@ -266,13 +276,10 @@ public class ActivitiesLogger {
   private static void recordActivity(ActivitiesManager activitiesManager,
   SchedulerNode node, String parentName, String childName,
   Priority priority, ActivityState state, String diagnostic, String type) {
-if (activitiesManager == null) {
-  return;
-}
-if (activitiesManager.shouldRecordThisNode(node.getNodeID())) {
-  activitiesManager.addSchedulingActivityForNode(node.getNodeID(),
-  parentName, childName, priority != null ? priority.toString() : null,
-  state, diagnostic, type);
-}
+
+activitiesManager.addSchedulingActivityForNode(node.getNodeID(), 
parentName,
+childName, priority != null ? priority.toString() : null, state,
+diagnostic, type);
+
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5070c9ba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java

hadoop git commit: HADOOP-14666. Tests use assertTrue(....equals(...)) instead of assertEquals()

2017-07-19 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/trunk 077fcf6a9 -> c21c26039


HADOOP-14666. Tests use assertTrue(equals(...)) instead of assertEquals()


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c21c2603
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c21c2603
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c21c2603

Branch: refs/heads/trunk
Commit: c21c26039238f633a0d2df9670f636d026c35649
Parents: 077fcf6
Author: Daniel Templeton 
Authored: Wed Jul 19 13:58:55 2017 -0700
Committer: Daniel Templeton 
Committed: Wed Jul 19 13:58:55 2017 -0700

--
 .../authentication/util/TestCertificateUtil.java |  6 --
 .../java/org/apache/hadoop/conf/TestDeprecatedKeys.java  |  2 +-
 .../apache/hadoop/crypto/key/TestKeyProviderFactory.java | 11 +++
 .../src/test/java/org/apache/hadoop/fs/TestHardLink.java |  2 +-
 .../security/alias/TestCredentialProviderFactory.java| 10 +-
 .../hadoop/security/authorize/TestAccessControlList.java |  8 
 .../apache/hadoop/util/TestReadWriteDiskValidator.java   |  5 +++--
 7 files changed, 25 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
index ce4176c..5794eb6 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.security.authentication.util;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -86,8 +88,8 @@ public class TestCertificateUtil {
 + "Mzc1xA==";
 try {
   RSAPublicKey pk = CertificateUtil.parseRSAPublicKey(pem);
-  assertTrue(pk != null);
-  assertTrue(pk.getAlgorithm().equals("RSA"));
+  assertNotNull(pk);
+  assertEquals("RSA", pk.getAlgorithm());
 } catch (ServletException se) {
   fail("Should not have thrown ServletException");
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
index 3036d0c..167daa5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
@@ -35,7 +35,7 @@ public class TestDeprecatedKeys extends TestCase {
 conf.set("topology.script.file.name", "xyz");
 conf.set("topology.script.file.name", "xyz");
 String scriptFile = 
conf.get(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
-assertTrue(scriptFile.equals("xyz")) ;
+assertEquals("xyz", scriptFile) ;
   }
   
   //Tests reading / writing a conf file with deprecation after setting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c21c2603/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
index 53785bc..db30eb0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
@@ -189,8 +189,10 @@ public class TestKeyProviderFactory {
 assertTrue("Returned Keys should have included key4.", 
keys.contains("key4"));
 
 List kvl = provider.getKeyVersions("key3");
-assertTrue("KeyVersions should have been returned for key3.", kvl.size() 
== 1);
-assertTrue("KeyVersions should 

hadoop git commit: HADOOP-14666. Tests use assertTrue(....equals(...)) instead of assertEquals()

2017-07-19 Thread templedf
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 10a6b6b12 -> 1d629c95a


HADOOP-14666. Tests use assertTrue(equals(...)) instead of assertEquals()

(cherry picked from commit c21c26039238f633a0d2df9670f636d026c35649)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1d629c95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1d629c95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1d629c95

Branch: refs/heads/branch-2
Commit: 1d629c95af65a9ae831e485411824e108b54a1fb
Parents: 10a6b6b
Author: Daniel Templeton 
Authored: Wed Jul 19 13:58:55 2017 -0700
Committer: Daniel Templeton 
Committed: Wed Jul 19 14:01:10 2017 -0700

--
 .../authentication/util/TestCertificateUtil.java |  6 --
 .../java/org/apache/hadoop/conf/TestDeprecatedKeys.java  |  2 +-
 .../apache/hadoop/crypto/key/TestKeyProviderFactory.java | 11 +++
 .../src/test/java/org/apache/hadoop/fs/TestHardLink.java |  2 +-
 .../security/alias/TestCredentialProviderFactory.java| 10 +-
 .../hadoop/security/authorize/TestAccessControlList.java |  8 
 .../apache/hadoop/util/TestReadWriteDiskValidator.java   |  5 +++--
 7 files changed, 25 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d629c95/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
index ce4176c..5794eb6 100644
--- 
a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
+++ 
b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestCertificateUtil.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.security.authentication.util;
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -86,8 +88,8 @@ public class TestCertificateUtil {
 + "Mzc1xA==";
 try {
   RSAPublicKey pk = CertificateUtil.parseRSAPublicKey(pem);
-  assertTrue(pk != null);
-  assertTrue(pk.getAlgorithm().equals("RSA"));
+  assertNotNull(pk);
+  assertEquals("RSA", pk.getAlgorithm());
 } catch (ServletException se) {
   fail("Should not have thrown ServletException");
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d629c95/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
index 3036d0c..167daa5 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestDeprecatedKeys.java
@@ -35,7 +35,7 @@ public class TestDeprecatedKeys extends TestCase {
 conf.set("topology.script.file.name", "xyz");
 conf.set("topology.script.file.name", "xyz");
 String scriptFile = 
conf.get(CommonConfigurationKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
-assertTrue(scriptFile.equals("xyz")) ;
+assertEquals("xyz", scriptFile) ;
   }
   
   //Tests reading / writing a conf file with deprecation after setting

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1d629c95/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
index 53785bc..db30eb0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/crypto/key/TestKeyProviderFactory.java
@@ -189,8 +189,10 @@ public class TestKeyProviderFactory {
 assertTrue("Returned Keys should have included key4.", 
keys.contains("key4"));
 
 List kvl = provider.getKeyVersions("key3");
-assertTrue("KeyVersions should have been 

svn commit: r1802426 [2/2] - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/ publish/docs/r2.8.1/ publish/docs/r2.8.1/api/ publish/docs/r2.8.1/api/org/ publish/docs/r2.8.

2017-07-19 Thread vinodkv

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml?rev=1802426=1802425=1802426=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/index.xml 
Wed Jul 19 20:02:27 2017
@@ -152,7 +152,18 @@
 
   
   
-26 May 2017: Release 3.0.0-alpha3 available 
+08 June, 2017: Release 2.8.1 available 
+
+  This is a security release in the 2.8.0 release line. It consists of 
2.8.0 plus security fixes. Users on 2.8.0 are encouraged to upgrade to 2.8.1.
+
+
+  Please note that 2.8.x release line continues to be not yet ready 
for production use. Critical issues are being ironed out via testing and 
downstream
+  adoption. Production users should wait for a subsequent release in 
the 2.8.x line.
+
+  
+
+  
+26 May, 2017: Release 3.0.0-alpha3 available 
 
   This is a security release in the 3.0.0 release line. It consists of 
alpha2 plus security fixes, along with necessary build-related fixes. Users on 
3.0.0-alpha1 and 3.0.0-alpha2 are encouraged to upgrade to 3.0.0-alpha3.
 

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml?rev=1802426=1802425=1802426=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
Wed Jul 19 20:02:27 2017
@@ -44,18 +44,18 @@
  https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-3.0.0-alpha4/hadoop-3.0.0-alpha4.tar.gz.mds;>checksum
 file


- 2.8.0
- 22 March, 2017
- http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-src.tar.gz;>source
- https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-src.tar.gz.asc;>signature
- https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0-src.tar.gz.mds;>77B6A9A5
 F1324A00..
+ 2.8.1
+ 08 June, 2017
+ http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-2.8.1/hadoop-2.8.1-src.tar.gz;>source
+ https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-2.8.1/hadoop-2.8.1-src.tar.gz.asc;>signature
+ https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-2.8.1/hadoop-2.8.1-src.tar.gz.mds;>0748C0E2
 519382F2..


  
  
- http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz;>binary
- https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz.asc;>signature
- https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz.mds;>3C0C6053
 651970C3..
+ http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-2.8.1/hadoop-2.8.1.tar.gz;>binary
+ https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-2.8.1/hadoop-2.8.1.tar.gz.asc;>signature
+ https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-2.8.1/hadoop-2.8.1.tar.gz.mds;>B5BE5275
 78EF2C85..


  2.7.3
@@ -142,6 +142,18 @@
   The alpha4 http://hadoop.apache.org/docs/r3.0.0-alpha4/hadoop-project-dist/hadoop-common/release/3.0.0-alpha4/RELEASENOTES.3.0.0-alpha4.html;>release
 notes and http://hadoop.apache.org/docs/r3.0.0-alpha4/hadoop-project-dist/hadoop-common/release/3.0.0-alpha4/CHANGES.3.0.0-alpha4.html;>changelog
 detail the changes since 3.0.0-alpha3.
 
   
+
+  
+08 June, 2017: Release 2.8.1 available 
+
+  This is a security release in the 2.8.0 release line. It consists of 
2.8.0 plus security fixes. Users on 2.8.0 are encouraged to upgrade to 2.8.1.
+
+
+  Please note that 2.8.x release line continues to be not yet ready 
for production use. Critical issues are being ironed out via testing and 
downstream
+  adoption. Production users should wait for a subsequent release in 
the 2.8.x line.
+
+  
+
   
 26 May 2017: Release 3.0.0-alpha3 available 
 

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/site.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/site.xml?rev=1802426=1802425=1802426=diff
==
--- 

svn commit: r1802426 [1/2] - in /hadoop/common/site/main: author/src/documentation/content/xdocs/ publish/ publish/docs/r2.8.1/ publish/docs/r2.8.1/api/ publish/docs/r2.8.1/api/org/ publish/docs/r2.8.

2017-07-19 Thread vinodkv
Author: vinodkv
Date: Wed Jul 19 20:02:27 2017
New Revision: 1802426

URL: http://svn.apache.org/viewvc?rev=1802426=rev
Log:
Updating site for release 2.8.1.

Added:
hadoop/common/site/main/publish/docs/r2.8.1/
hadoop/common/site/main/publish/docs/r2.8.1/api/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/ant/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/ant/condition/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/class-use/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/classification/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/classification/class-use/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/conf/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/conf/class-use/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/contrib/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/contrib/bkjournal/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/contrib/utils/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/contrib/utils/join/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/crypto/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/crypto/key/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/crypto/key/class-use/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/crypto/key/kms/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/crypto/key/kms/server/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/crypto/random/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/examples/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/examples/dancing/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/examples/pi/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/examples/pi/math/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/examples/terasort/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/filecache/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/filecache/class-use/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/adl/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/adl/class-use/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/adl/oauth2/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/adl/oauth2/class-use/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/azure/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/azure/class-use/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/azure/metrics/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/azure/metrics/class-use/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/class-use/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/crypto/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/ftp/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/ftp/class-use/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/http/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/http/client/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/http/server/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/permission/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/permission/class-use/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/s3/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/s3/class-use/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/s3a/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/s3a/class-use/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/s3native/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/s3native/class-use/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/sftp/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/shell/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/shell/find/
hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/swift/

hadoop/common/site/main/publish/docs/r2.8.1/api/org/apache/hadoop/fs/swift/auth/


hadoop git commit: YARN-6777. Support for ApplicationMasterService processing chain of interceptors. (asuresh)

2017-07-19 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3556e36be -> 077fcf6a9


YARN-6777. Support for ApplicationMasterService processing chain of 
interceptors. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/077fcf6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/077fcf6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/077fcf6a

Branch: refs/heads/trunk
Commit: 077fcf6a96e420e7f36350931722b8603d010cf1
Parents: 3556e36
Author: Arun Suresh 
Authored: Mon Jul 17 17:02:22 2017 -0700
Committer: Arun Suresh 
Committed: Wed Jul 19 12:26:40 2017 -0700

--
 .../ams/ApplicationMasterServiceContext.java|  29 
 .../ams/ApplicationMasterServiceProcessor.java  |  30 ++--
 .../hadoop/yarn/conf/YarnConfiguration.java |   5 +-
 .../src/main/resources/yarn-default.xml |  10 ++
 .../resourcemanager/AMSProcessingChain.java | 102 
 .../ApplicationMasterService.java   |  49 --
 .../resourcemanager/DefaultAMSProcessor.java|  69 
 ...pportunisticContainerAllocatorAMService.java |  67 +---
 .../yarn/server/resourcemanager/RMContext.java  |   3 +-
 .../TestApplicationMasterService.java   | 163 ++-
 10 files changed, 446 insertions(+), 81 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceContext.java
new file mode 100644
index 000..988c727
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceContext.java
@@ -0,0 +1,29 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.ams;
+
+/**
+ * This is a marker interface for a context object that is injected into
+ * the ApplicationMasterService processor. The processor implementation
+ * is free to type cast this based on the availability of the context's
+ * implementation in the classpath.
+ */
+public interface ApplicationMasterServiceContext {
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/077fcf6a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
index b426f48..b7d925a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceProcessor.java
@@ -38,34 +38,44 @@ import java.io.IOException;
 public interface ApplicationMasterServiceProcessor {
 
   /**
+   * Initialize with and ApplicationMasterService Context as well as the
+   * next processor in the chain.
+   * @param amsContext AMSContext.
+   * @param nextProcessor next ApplicationMasterServiceProcessor
+   */
+  void init(ApplicationMasterServiceContext amsContext,
+  ApplicationMasterServiceProcessor nextProcessor);
+
+  /**
* Register AM attempt.
* @param applicationAttemptId applicationAttemptId.
* @param request Register Request.
-   * @return Register Response.
+   * @param response Register Response.
* @throws IOException IOException.
*/
-  

hadoop git commit: HDFS-12139. HTTPFS liststatus returns incorrect pathSuffix for path of file. Contributed by Yongjun Zhang.

2017-07-19 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 23b920cd7 -> 10a6b6b12


HDFS-12139. HTTPFS liststatus returns incorrect pathSuffix for path of file. 
Contributed by Yongjun Zhang.

(cherry picked from commit 3556e36be30211f46ac38899ce11a4d4cac6d635)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/10a6b6b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/10a6b6b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/10a6b6b1

Branch: refs/heads/branch-2
Commit: 10a6b6b12c2ba4905c399697cb974525994b6e9d
Parents: 23b920c
Author: Yongjun Zhang 
Authored: Wed Jul 19 10:54:13 2017 -0700
Committer: Yongjun Zhang 
Committed: Wed Jul 19 11:44:35 2017 -0700

--
 .../hadoop/fs/http/server/FSOperations.java | 15 ++-
 .../fs/http/client/BaseTestHttpFSWith.java  | 26 +++-
 2 files changed, 34 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/10a6b6b1/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 7a8634e..9471c0f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -75,15 +75,17 @@ public class FSOperations {
 
   /**
* @param fileStatuses list of FileStatus objects
+   * @param isFile is the fileStatuses from a file path
* @return JSON map suitable for wire transport
*/
   @SuppressWarnings({"unchecked"})
-  private static Map toJson(FileStatus[] fileStatuses) {
+  private static Map toJson(FileStatus[] fileStatuses,
+  boolean isFile) {
 Map json = new LinkedHashMap<>();
 Map inner = new LinkedHashMap<>();
 JSONArray statuses = new JSONArray();
 for (FileStatus f : fileStatuses) {
-  statuses.add(toJsonInner(f, false));
+  statuses.add(toJsonInner(f, isFile));
 }
 inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
 json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
@@ -126,13 +128,14 @@ public class FSOperations {
* These two classes are slightly different, due to the impedance
* mismatches between the WebHDFS and FileSystem APIs.
* @param entries
+   * @param isFile is the entries from a file path
* @return json
*/
   private static Map toJson(FileSystem.DirectoryEntries
-  entries) {
+  entries, boolean isFile) {
 Map json = new LinkedHashMap<>();
 Map inner = new LinkedHashMap<>();
-Map fileStatuses = toJson(entries.getEntries());
+Map fileStatuses = toJson(entries.getEntries(), isFile);
 inner.put(HttpFSFileSystem.PARTIAL_LISTING_JSON, fileStatuses);
 inner.put(HttpFSFileSystem.REMAINING_ENTRIES_JSON, entries.hasMore() ? 1
 : 0);
@@ -687,7 +690,7 @@ public class FSOperations {
 @Override
 public Map execute(FileSystem fs) throws IOException {
   FileStatus[] fileStatuses = fs.listStatus(path, filter);
-  return toJson(fileStatuses);
+  return toJson(fileStatuses, fs.getFileStatus(path).isFile());
 }
 
 @Override
@@ -732,7 +735,7 @@ public class FSOperations {
   WrappedFileSystem wrappedFS = new WrappedFileSystem(fs);
   FileSystem.DirectoryEntries entries =
   wrappedFS.listStatusBatch(path, token);
-  return toJson(entries);
+  return toJson(entries, wrappedFS.getFileStatus(path).isFile());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/10a6b6b1/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index 8dd0116..d794143 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -363,8 +363,15 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
 

hadoop git commit: HADOOP-14642. wasb: add support for caching Authorization and SASKeys. Contributed by Sivaguru Sankaridurg.

2017-07-19 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 e0297ffbc -> 23b920cd7


HADOOP-14642. wasb: add support for caching Authorization and SASKeys. 
Contributed by Sivaguru Sankaridurg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/23b920cd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/23b920cd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/23b920cd

Branch: refs/heads/branch-2
Commit: 23b920cd7ab23ad71adc75439e8bb6ec5a7924bd
Parents: e0297ff
Author: Jitendra Pandey 
Authored: Wed Jul 19 00:38:45 2017 -0700
Committer: Jitendra Pandey 
Committed: Wed Jul 19 11:37:36 2017 -0700

--
 .../src/main/resources/core-default.xml |   9 +
 .../conf/TestCommonConfigurationFields.java |   1 +
 .../hadoop/fs/azure/CachingAuthorizer.java  | 232 +++
 .../fs/azure/LocalSASKeyGeneratorImpl.java  |  28 ++-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |   3 -
 .../fs/azure/RemoteSASKeyGeneratorImpl.java |  46 +++-
 .../fs/azure/RemoteWasbAuthorizerImpl.java  |  38 ++-
 .../hadoop/fs/azure/SASKeyGeneratorImpl.java|   4 +-
 .../hadoop-azure/src/site/markdown/index.md |  38 +++
 .../hadoop/fs/azure/AbstractWasbTestBase.java   |   5 +
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java |  22 +-
 .../TestNativeAzureFSAuthorizationCaching.java  |  60 +
 .../TestNativeAzureFileSystemAuthorization.java |  86 ++-
 ...veAzureFileSystemAuthorizationWithOwner.java |   2 +-
 .../fs/azure/TestWasbRemoteCallHelper.java  |   6 +-
 .../src/test/resources/azure-test.xml   |   3 +-
 16 files changed, 500 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b920cd/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 0ea607f..4d6b19e 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1347,6 +1347,15 @@
   
 
 
+
+  fs.azure.authorization.caching.enable
+  true
+  
+Config flag to enable caching of authorization results and saskeys in WASB.
+This flag is relevant only when fs.azure.authorization is enabled.
+  
+
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b920cd/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 30e08d5..65e452e 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -111,6 +111,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPropsToSkipCompare.add("fs.azure.local.sas.key.mode");
 xmlPropsToSkipCompare.add("fs.azure.secure.mode");
 xmlPropsToSkipCompare.add("fs.azure.authorization");
+xmlPropsToSkipCompare.add("fs.azure.authorization.caching.enable");
 
 // ADL properties are in a different subtree
 // - org.apache.hadoop.hdfs.web.ADLConfKeys

http://git-wip-us.apache.org/repos/asf/hadoop/blob/23b920cd/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
new file mode 100644
index 000..016ae74
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
@@ -0,0 +1,232 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable 

hadoop git commit: HDFS-12139. HTTPFS liststatus returns incorrect pathSuffix for path of file. Contributed by Yongjun Zhang.

2017-07-19 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/trunk 413b23eb0 -> 3556e36be


HDFS-12139. HTTPFS liststatus returns incorrect pathSuffix for path of file. 
Contributed by Yongjun Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3556e36b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3556e36b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3556e36b

Branch: refs/heads/trunk
Commit: 3556e36be30211f46ac38899ce11a4d4cac6d635
Parents: 413b23e
Author: Yongjun Zhang 
Authored: Wed Jul 19 10:54:13 2017 -0700
Committer: Yongjun Zhang 
Committed: Wed Jul 19 10:56:50 2017 -0700

--
 .../hadoop/fs/http/server/FSOperations.java | 15 ++-
 .../fs/http/client/BaseTestHttpFSWith.java  | 26 +++-
 2 files changed, 34 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3556e36b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
index 0fb665a..f1615c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java
@@ -75,15 +75,17 @@ public class FSOperations {
 
   /**
* @param fileStatuses list of FileStatus objects
+   * @param isFile is the fileStatuses from a file path
* @return JSON map suitable for wire transport
*/
   @SuppressWarnings({"unchecked"})
-  private static Map toJson(FileStatus[] fileStatuses) {
+  private static Map toJson(FileStatus[] fileStatuses,
+  boolean isFile) {
 Map json = new LinkedHashMap<>();
 Map inner = new LinkedHashMap<>();
 JSONArray statuses = new JSONArray();
 for (FileStatus f : fileStatuses) {
-  statuses.add(toJsonInner(f, false));
+  statuses.add(toJsonInner(f, isFile));
 }
 inner.put(HttpFSFileSystem.FILE_STATUS_JSON, statuses);
 json.put(HttpFSFileSystem.FILE_STATUSES_JSON, inner);
@@ -129,13 +131,14 @@ public class FSOperations {
* These two classes are slightly different, due to the impedance
* mismatches between the WebHDFS and FileSystem APIs.
* @param entries
+   * @param isFile is the entries from a file path
* @return json
*/
   private static Map toJson(FileSystem.DirectoryEntries
-  entries) {
+  entries, boolean isFile) {
 Map json = new LinkedHashMap<>();
 Map inner = new LinkedHashMap<>();
-Map fileStatuses = toJson(entries.getEntries());
+Map fileStatuses = toJson(entries.getEntries(), isFile);
 inner.put(HttpFSFileSystem.PARTIAL_LISTING_JSON, fileStatuses);
 inner.put(HttpFSFileSystem.REMAINING_ENTRIES_JSON, entries.hasMore() ? 1
 : 0);
@@ -690,7 +693,7 @@ public class FSOperations {
 @Override
 public Map execute(FileSystem fs) throws IOException {
   FileStatus[] fileStatuses = fs.listStatus(path, filter);
-  return toJson(fileStatuses);
+  return toJson(fileStatuses, fs.getFileStatus(path).isFile());
 }
 
 @Override
@@ -735,7 +738,7 @@ public class FSOperations {
   WrappedFileSystem wrappedFS = new WrappedFileSystem(fs);
   FileSystem.DirectoryEntries entries =
   wrappedFS.listStatusBatch(path, token);
-  return toJson(entries);
+  return toJson(entries, wrappedFS.getFileStatus(path).isFile());
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3556e36b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
index 0fd3f91..e23093e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java
@@ -364,8 +364,15 @@ public abstract class BaseTestHttpFSWith extends 
HFSTestCase {
 assertEquals(status2.getLen(), status1.getLen());
 
 FileStatus[] stati = 

hadoop git commit: HDFS-12158. Secondary Namenode's web interface lack configs for X-FRAME-OPTIONS protection. Contributed by Mukul Kumar Singh.

2017-07-19 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8.2 a389a4f26 -> 38521af08


HDFS-12158. Secondary Namenode's web interface lack configs for X-FRAME-OPTIONS 
protection. Contributed by Mukul Kumar Singh.

(cherry picked from commit 413b23eb04eee24275257ab462133e0818f87449)
(cherry picked from commit e0297ffbc89e9f037d5f6a8c5874ce8794656e0c)
(cherry picked from commit 6ed569df217a2c0d0e23661d2353c1fac428ee80)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/38521af0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/38521af0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/38521af0

Branch: refs/heads/branch-2.8.2
Commit: 38521af0880c4e19a16571c3adbfe668ba4f79c2
Parents: a389a4f
Author: Anu Engineer 
Authored: Wed Jul 19 10:29:06 2017 -0700
Committer: Anu Engineer 
Committed: Wed Jul 19 10:50:54 2017 -0700

--
 .../hdfs/server/namenode/SecondaryNameNode.java | 10 +
 .../namenode/TestNameNodeHttpServerXFrame.java  | 22 
 2 files changed, 32 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/38521af0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index acb2c8a..175d138 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -479,6 +479,16 @@ public class SecondaryNameNode implements Runnable,
 DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
 DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
 
+final boolean xFrameEnabled = conf.getBoolean(
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED,
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
+
+final String xFrameOptionValue = conf.getTrimmed(
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE,
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT);
+
+builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue);
+
 infoServer = builder.build();
 infoServer.setAttribute("secondary.name.node", this);
 infoServer.setAttribute("name.system.image", checkpointImage);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/38521af0/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
index 947e951..aaa713e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer2;
@@ -32,6 +33,7 @@ import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.URL;
+import java.net.URI;
 
 /**
  * A class to test the XFrameoptions of Namenode HTTP Server. We are not 
reusing
@@ -94,4 +96,24 @@ public class TestNameNodeHttpServerXFrame {
 conn.connect();
 return conn;
   }
+
+  @Test
+  public void testSecondaryNameNodeXFrame() throws IOException {
+Configuration conf = new HdfsConfiguration();
+FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
+
+SecondaryNameNode sn = new SecondaryNameNode(conf);
+sn.startInfoServer();
+InetSocketAddress httpAddress = SecondaryNameNode.getHttpAddress(conf);
+
+URL url = URI.create("http://; + httpAddress.getHostName()
++ ":" + httpAddress.getPort()).toURL();
+HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+conn.connect();
+String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
+Assert.assertTrue("X-FRAME-OPTIONS is absent in the header",
+xfoHeader != null);
+

hadoop git commit: HDFS-12158. Secondary Namenode's web interface lack configs for X-FRAME-OPTIONS protection. Contributed by Mukul Kumar Singh.

2017-07-19 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 4daf5741e -> 6ed569df2


HDFS-12158. Secondary Namenode's web interface lack configs for X-FRAME-OPTIONS 
protection. Contributed by Mukul Kumar Singh.

(cherry picked from commit 413b23eb04eee24275257ab462133e0818f87449)
(cherry picked from commit e0297ffbc89e9f037d5f6a8c5874ce8794656e0c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ed569df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ed569df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ed569df

Branch: refs/heads/branch-2.8
Commit: 6ed569df217a2c0d0e23661d2353c1fac428ee80
Parents: 4daf574
Author: Anu Engineer 
Authored: Wed Jul 19 10:29:06 2017 -0700
Committer: Anu Engineer 
Committed: Wed Jul 19 10:46:21 2017 -0700

--
 .../hdfs/server/namenode/SecondaryNameNode.java | 10 +
 .../namenode/TestNameNodeHttpServerXFrame.java  | 22 
 2 files changed, 32 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ed569df/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index acb2c8a..175d138 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -479,6 +479,16 @@ public class SecondaryNameNode implements Runnable,
 DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
 DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
 
+final boolean xFrameEnabled = conf.getBoolean(
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED,
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
+
+final String xFrameOptionValue = conf.getTrimmed(
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE,
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT);
+
+builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue);
+
 infoServer = builder.build();
 infoServer.setAttribute("secondary.name.node", this);
 infoServer.setAttribute("name.system.image", checkpointImage);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ed569df/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
index 947e951..aaa713e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer2;
@@ -32,6 +33,7 @@ import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.URL;
+import java.net.URI;
 
 /**
  * A class to test the XFrameoptions of Namenode HTTP Server. We are not 
reusing
@@ -94,4 +96,24 @@ public class TestNameNodeHttpServerXFrame {
 conn.connect();
 return conn;
   }
+
+  @Test
+  public void testSecondaryNameNodeXFrame() throws IOException {
+Configuration conf = new HdfsConfiguration();
+FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
+
+SecondaryNameNode sn = new SecondaryNameNode(conf);
+sn.startInfoServer();
+InetSocketAddress httpAddress = SecondaryNameNode.getHttpAddress(conf);
+
+URL url = URI.create("http://; + httpAddress.getHostName()
++ ":" + httpAddress.getPort()).toURL();
+HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+conn.connect();
+String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
+Assert.assertTrue("X-FRAME-OPTIONS is absent in the header",
+xfoHeader != null);
+Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption
+.SAMEORIGIN.toString()));
+ 

hadoop git commit: HDFS-12158. Secondary Namenode's web interface lack configs for X-FRAME-OPTIONS protection. Contributed by Mukul Kumar Singh.

2017-07-19 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 979d37ae2 -> e0297ffbc


HDFS-12158. Secondary Namenode's web interface lack configs for X-FRAME-OPTIONS 
protection. Contributed by Mukul Kumar Singh.

(cherry picked from commit 413b23eb04eee24275257ab462133e0818f87449)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e0297ffb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e0297ffb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e0297ffb

Branch: refs/heads/branch-2
Commit: e0297ffbc89e9f037d5f6a8c5874ce8794656e0c
Parents: 979d37a
Author: Anu Engineer 
Authored: Wed Jul 19 10:29:06 2017 -0700
Committer: Anu Engineer 
Committed: Wed Jul 19 10:42:28 2017 -0700

--
 .../hdfs/server/namenode/SecondaryNameNode.java | 10 +
 .../namenode/TestNameNodeHttpServerXFrame.java  | 22 
 2 files changed, 32 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0297ffb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 1358f46..d31f5db 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -479,6 +479,16 @@ public class SecondaryNameNode implements Runnable,
 DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
 DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
 
+final boolean xFrameEnabled = conf.getBoolean(
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED,
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
+
+final String xFrameOptionValue = conf.getTrimmed(
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE,
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT);
+
+builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue);
+
 infoServer = builder.build();
 infoServer.setAttribute("secondary.name.node", this);
 infoServer.setAttribute("name.system.image", checkpointImage);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e0297ffb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
index 947e951..aaa713e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer2;
@@ -32,6 +33,7 @@ import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.URL;
+import java.net.URI;
 
 /**
  * A class to test the XFrameoptions of Namenode HTTP Server. We are not 
reusing
@@ -94,4 +96,24 @@ public class TestNameNodeHttpServerXFrame {
 conn.connect();
 return conn;
   }
+
+  @Test
+  public void testSecondaryNameNodeXFrame() throws IOException {
+Configuration conf = new HdfsConfiguration();
+FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
+
+SecondaryNameNode sn = new SecondaryNameNode(conf);
+sn.startInfoServer();
+InetSocketAddress httpAddress = SecondaryNameNode.getHttpAddress(conf);
+
+URL url = URI.create("http://; + httpAddress.getHostName()
++ ":" + httpAddress.getPort()).toURL();
+HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+conn.connect();
+String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
+Assert.assertTrue("X-FRAME-OPTIONS is absent in the header",
+xfoHeader != null);
+Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption
+.SAMEORIGIN.toString()));
+  }
 }



hadoop git commit: HDFS-12158. Secondary Namenode's web interface lack configs for X-FRAME-OPTIONS protection. Contributed by Mukul Kumar Singh.

2017-07-19 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 04ff412da -> 413b23eb0


HDFS-12158. Secondary Namenode's web interface lack configs for X-FRAME-OPTIONS 
protection. Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/413b23eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/413b23eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/413b23eb

Branch: refs/heads/trunk
Commit: 413b23eb04eee24275257ab462133e0818f87449
Parents: 04ff412
Author: Anu Engineer 
Authored: Wed Jul 19 10:29:06 2017 -0700
Committer: Anu Engineer 
Committed: Wed Jul 19 10:29:06 2017 -0700

--
 .../hdfs/server/namenode/SecondaryNameNode.java | 10 +
 .../namenode/TestNameNodeHttpServerXFrame.java  | 22 
 2 files changed, 32 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/413b23eb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 6dd085a..ff83e34 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -479,6 +479,16 @@ public class SecondaryNameNode implements Runnable,
 DFS_SECONDARY_NAMENODE_KERBEROS_INTERNAL_SPNEGO_PRINCIPAL_KEY,
 DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
 
+final boolean xFrameEnabled = conf.getBoolean(
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED,
+DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
+
+final String xFrameOptionValue = conf.getTrimmed(
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE,
+DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT);
+
+builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue);
+
 infoServer = builder.build();
 infoServer.setAttribute("secondary.name.node", this);
 infoServer.setAttribute("name.system.image", checkpointImage);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/413b23eb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
index 947e951..aaa713e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeHttpServerXFrame.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.http.HttpServer2;
@@ -32,6 +33,7 @@ import java.net.HttpURLConnection;
 import java.net.InetSocketAddress;
 import java.net.MalformedURLException;
 import java.net.URL;
+import java.net.URI;
 
 /**
  * A class to test the XFrameoptions of Namenode HTTP Server. We are not 
reusing
@@ -94,4 +96,24 @@ public class TestNameNodeHttpServerXFrame {
 conn.connect();
 return conn;
   }
+
+  @Test
+  public void testSecondaryNameNodeXFrame() throws IOException {
+Configuration conf = new HdfsConfiguration();
+FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
+
+SecondaryNameNode sn = new SecondaryNameNode(conf);
+sn.startInfoServer();
+InetSocketAddress httpAddress = SecondaryNameNode.getHttpAddress(conf);
+
+URL url = URI.create("http://; + httpAddress.getHostName()
++ ":" + httpAddress.getPort()).toURL();
+HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+conn.connect();
+String xfoHeader = conn.getHeaderField("X-FRAME-OPTIONS");
+Assert.assertTrue("X-FRAME-OPTIONS is absent in the header",
+xfoHeader != null);
+Assert.assertTrue(xfoHeader.endsWith(HttpServer2.XFrameOption
+.SAMEORIGIN.toString()));
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org

hadoop git commit: HDFS-12133. Correct ContentSummaryComputationContext Logger class name.. Contributed by Surendra Singh Lilhore.

2017-07-19 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/trunk f8cd55fe3 -> 04ff412da


HDFS-12133. Correct ContentSummaryComputationContext Logger class name.. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/04ff412d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/04ff412d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/04ff412d

Branch: refs/heads/trunk
Commit: 04ff412dabf3f6b9d884171c4140adbc636d5387
Parents: f8cd55f
Author: Brahma Reddy Battula 
Authored: Wed Jul 19 23:43:10 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Wed Jul 19 23:43:10 2017 +0800

--
 .../hdfs/server/namenode/ContentSummaryComputationContext.java| 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/04ff412d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
index 43e6f0d..c81f82c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryComputationContext.java
@@ -48,7 +48,8 @@ public class ContentSummaryComputationContext {
   private int sleepNanoSec = 0;
 
   public static final String REPLICATED = "Replicated";
-  public static final Log LOG = LogFactory.getLog(INode.class);
+  public static final Log LOG = LogFactory
+  .getLog(ContentSummaryComputationContext.class);
 
   private FSPermissionChecker pc;
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HDFS-12067. Correct dfsadmin commands usage message to reflects IPC port. Contributed by steven-wugang.

2017-07-19 Thread brahma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 057631c03 -> 979d37ae2
  refs/heads/trunk df180259b -> f8cd55fe3


HDFS-12067. Correct dfsadmin commands usage message to reflects IPC port. 
Contributed by steven-wugang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8cd55fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8cd55fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8cd55fe

Branch: refs/heads/trunk
Commit: f8cd55fe33665faf2d1b14df231516fc891118fc
Parents: df18025
Author: Brahma Reddy Battula 
Authored: Wed Jul 19 23:21:43 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Wed Jul 19 23:21:43 2017 +0800

--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 57 
 1 file changed, 34 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8cd55fe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 06f408d..ea76093 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -1113,29 +1113,39 @@ public class DFSAdmin extends FsShell {
 "\tor gets a list of reconfigurable properties.\n" +
 
 "\tThe second parameter specifies the node type\n";
-String genericRefresh = "-refresh: Arguments are  
 [arg1..argn]\n" +
-  "\tTriggers a runtime-refresh of the resource specified by 
\n" +
-  "\ton . All other args after are sent to the host.\n";
+String genericRefresh = "-refresh: Arguments are " +
+"  [arg1..argn]\n" +
+"\tTriggers a runtime-refresh of the resource specified by " +
+" on .\n" +
+"\tAll other args after are sent to the host.\n" +
+"\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
 
 String printTopology = "-printTopology: Print a tree of the racks and 
their\n" +
"\t\tnodes as reported by the Namenode\n";
 
-String refreshNamenodes = "-refreshNamenodes: Takes a datanodehost:port as 
argument,\n"+
-  "\t\tFor the given datanode, reloads the 
configuration files,\n" +
-  "\t\tstops serving the removed block-pools\n"+
-  "\t\tand starts serving new block-pools\n";
+String refreshNamenodes = "-refreshNamenodes: Takes a " +
+"datanodehost:ipc_port as argument,For the given datanode\n" +
+"\t\treloads the configuration files,stops serving the removed\n" +
+"\t\tblock-pools and starts serving new block-pools.\n" +
+"\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
 
-String getVolumeReport = "-getVolumeReport: Takes a datanodehost:port as "
-+ "argument,\n\t\tFor the given datanode, get the volume report\n";
-
-String deleteBlockPool = "-deleteBlockPool: Arguments are 
datanodehost:port, blockpool id\n"+
- "\t\t and an optional argument \"force\". If 
force is passed,\n"+
- "\t\t block pool directory for the given 
blockpool id on the given\n"+
- "\t\t datanode is deleted along with its 
contents, otherwise\n"+
- "\t\t the directory is deleted only if it is 
empty. The command\n" +
- "\t\t will fail if datanode is still serving the 
block pool.\n" +
- "\t\t   Refer to refreshNamenodes to shutdown a 
block pool\n" +
- "\t\t service on a datanode.\n";
+String getVolumeReport = "-getVolumeReport: Takes a datanodehost:ipc_port"+
+" as argument,For the given datanode,get the volume report.\n" +
+"\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
+
+String deleteBlockPool = "-deleteBlockPool: Arguments are " +
+"datanodehost:ipc_port, blockpool id and an optional argument\n" +
+"\t\t\"force\". If force is passed,block pool directory for\n" +
+"\t\tthe given blockpool id on the given datanode is deleted\n" +
+"\t\talong with its contents,otherwise the 

[2/2] hadoop git commit: HDFS-12067. Correct dfsadmin commands usage message to reflects IPC port. Contributed by steven-wugang.

2017-07-19 Thread brahma
HDFS-12067. Correct dfsadmin commands usage message to reflects IPC port. 
Contributed by steven-wugang.

(cherry picked from commit f8cd55fe33665faf2d1b14df231516fc891118fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/979d37ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/979d37ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/979d37ae

Branch: refs/heads/branch-2
Commit: 979d37ae21413b97170a89631f0879b9d2d280ff
Parents: 057631c
Author: Brahma Reddy Battula 
Authored: Wed Jul 19 23:21:43 2017 +0800
Committer: Brahma Reddy Battula 
Committed: Wed Jul 19 23:22:40 2017 +0800

--
 .../org/apache/hadoop/hdfs/tools/DFSAdmin.java  | 57 
 1 file changed, 34 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/979d37ae/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index bea3441..fb8908c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -1082,29 +1082,39 @@ public class DFSAdmin extends FsShell {
 "\tor gets a list of reconfigurable properties.\n" +
 
 "\tThe second parameter specifies the node type\n";
-String genericRefresh = "-refresh: Arguments are  
 [arg1..argn]\n" +
-  "\tTriggers a runtime-refresh of the resource specified by 
\n" +
-  "\ton . All other args after are sent to the host.\n";
+String genericRefresh = "-refresh: Arguments are " +
+"  [arg1..argn]\n" +
+"\tTriggers a runtime-refresh of the resource specified by " +
+" on .\n" +
+"\tAll other args after are sent to the host.\n" +
+"\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
 
 String printTopology = "-printTopology: Print a tree of the racks and 
their\n" +
"\t\tnodes as reported by the Namenode\n";
 
-String refreshNamenodes = "-refreshNamenodes: Takes a datanodehost:port as 
argument,\n"+
-  "\t\tFor the given datanode, reloads the 
configuration files,\n" +
-  "\t\tstops serving the removed block-pools\n"+
-  "\t\tand starts serving new block-pools\n";
+String refreshNamenodes = "-refreshNamenodes: Takes a " +
+"datanodehost:ipc_port as argument,For the given datanode\n" +
+"\t\treloads the configuration files,stops serving the removed\n" +
+"\t\tblock-pools and starts serving new block-pools.\n" +
+"\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
 
-String getVolumeReport = "-getVolumeReport: Takes a datanodehost:port as "
-+ "argument,\n\t\tFor the given datanode, get the volume report\n";
-
-String deleteBlockPool = "-deleteBlockPool: Arguments are 
datanodehost:port, blockpool id\n"+
- "\t\t and an optional argument \"force\". If 
force is passed,\n"+
- "\t\t block pool directory for the given 
blockpool id on the given\n"+
- "\t\t datanode is deleted along with its 
contents, otherwise\n"+
- "\t\t the directory is deleted only if it is 
empty. The command\n" +
- "\t\t will fail if datanode is still serving the 
block pool.\n" +
- "\t\t   Refer to refreshNamenodes to shutdown a 
block pool\n" +
- "\t\t service on a datanode.\n";
+String getVolumeReport = "-getVolumeReport: Takes a datanodehost:ipc_port"+
+" as argument,For the given datanode,get the volume report.\n" +
+"\t\tThe ipc_port is determined by 'dfs.datanode.ipc.address'," +
+"default is DFS_DATANODE_IPC_DEFAULT_PORT.\n";
+
+String deleteBlockPool = "-deleteBlockPool: Arguments are " +
+"datanodehost:ipc_port, blockpool id and an optional argument\n" +
+"\t\t\"force\". If force is passed,block pool directory for\n" +
+"\t\tthe given blockpool id on the given datanode is deleted\n" +
+"\t\talong with its contents,otherwise the directory is deleted\n"+
+"\t\tonly if it is 

hadoop git commit: HADOOP-14669. GenericTestUtils.waitFor should use monotonic time. Contributed by Daniel Templeton

2017-07-19 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f806e8627 -> 057631c03


HADOOP-14669. GenericTestUtils.waitFor should use monotonic time. Contributed 
by Daniel Templeton

(cherry picked from commit df180259b0cc3660e199e85447c7193bee51751c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/057631c0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/057631c0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/057631c0

Branch: refs/heads/branch-2
Commit: 057631c031f40470480ba301d1c346f4a26fa5a5
Parents: f806e86
Author: Jason Lowe 
Authored: Wed Jul 19 09:41:22 2017 -0500
Committer: Jason Lowe 
Committed: Wed Jul 19 09:42:38 2017 -0500

--
 .../src/test/java/org/apache/hadoop/test/GenericTestUtils.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/057631c0/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 38a0c6c..9291bb0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -356,10 +356,10 @@ public abstract class GenericTestUtils {
 Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
 ERROR_INVALID_ARGUMENT);
 
-long st = Time.now();
+long st = Time.monotonicNow();
 boolean result = check.get();
 
-while (!result && (Time.now() - st < waitForMillis)) {
+while (!result && (Time.monotonicNow() - st < waitForMillis)) {
   Thread.sleep(checkEveryMillis);
   result = check.get();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14669. GenericTestUtils.waitFor should use monotonic time. Contributed by Daniel Templeton

2017-07-19 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 2843c688b -> df180259b


HADOOP-14669. GenericTestUtils.waitFor should use monotonic time. Contributed 
by Daniel Templeton


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/df180259
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/df180259
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/df180259

Branch: refs/heads/trunk
Commit: df180259b0cc3660e199e85447c7193bee51751c
Parents: 2843c68
Author: Jason Lowe 
Authored: Wed Jul 19 09:41:22 2017 -0500
Committer: Jason Lowe 
Committed: Wed Jul 19 09:41:22 2017 -0500

--
 .../src/test/java/org/apache/hadoop/test/GenericTestUtils.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/df180259/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 38a0c6c..9291bb0 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -356,10 +356,10 @@ public abstract class GenericTestUtils {
 Preconditions.checkArgument(waitForMillis >= checkEveryMillis,
 ERROR_INVALID_ARGUMENT);
 
-long st = Time.now();
+long st = Time.monotonicNow();
 boolean result = check.get();
 
-while (!result && (Time.now() - st < waitForMillis)) {
+while (!result && (Time.monotonicNow() - st < waitForMillis)) {
   Thread.sleep(checkEveryMillis);
   result = check.get();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to improve thread cleanup time. Contributed by Rakesh R.

2017-07-19 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 50a90 -> d8122f25b


HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to 
improve thread cleanup time. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8122f25
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8122f25
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8122f25

Branch: refs/heads/HDFS-10285
Commit: d8122f25b13a213ed3a74c0d34857ab7986ffd6e
Parents: 50a9bbb
Author: Uma Maheswara Rao G 
Authored: Wed Jul 19 00:55:26 2017 -0700
Committer: Uma Maheswara Rao G 
Committed: Wed Jul 19 00:55:26 2017 -0700

--
 .../datanode/BlockStorageMovementTracker.java   | 16 
 .../server/datanode/StoragePolicySatisfyWorker.java |  5 +++--
 2 files changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8122f25/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index c7e952b..f3d2bb6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -77,7 +77,8 @@ public class BlockStorageMovementTracker implements Runnable {
 moverTaskFutures.wait(2000);
   }
 } catch (InterruptedException ignore) {
-  // ignore
+  // Sets interrupt flag of this thread.
+  Thread.currentThread().interrupt();
 }
   }
   try {
@@ -102,12 +103,19 @@ public class BlockStorageMovementTracker implements 
Runnable {
 synchronized (moverTaskFutures) {
   moverTaskFutures.remove(trackId);
 }
-// handle completed or inprogress blocks movements per trackId.
-blksMovementsStatusHandler.handle(resultPerTrackIdList);
+if (running) {
+  // handle completed or inprogress blocks movements per trackId.
+  blksMovementsStatusHandler.handle(resultPerTrackIdList);
+}
 movementResults.remove(trackId);
   }
 }
-  } catch (ExecutionException | InterruptedException e) {
+  } catch (InterruptedException e) {
+if (running) {
+  LOG.error("Exception while moving block replica to target storage"
+  + " type", e);
+}
+  } catch (ExecutionException e) {
 // TODO: Do we need failure retries and implement the same if required.
 LOG.error("Exception while moving block replica to target storage 
type",
 e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8122f25/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 196cd58..4e57805 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -137,8 +137,8 @@ public class StoragePolicySatisfyWorker {
* thread.
*/
   void stop() {
-movementTrackerThread.interrupt();
 movementTracker.stopTracking();
+movementTrackerThread.interrupt();
   }
 
   /**
@@ -147,7 +147,8 @@ public class StoragePolicySatisfyWorker {
   void waitToFinishWorkerThread() {
 try {
   movementTrackerThread.join(3000);
-} catch (InterruptedException ie) {
+} catch (InterruptedException ignore) {
+  // ignore
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-14642. wasb: add support for caching Authorization and SASKeys. Contributed by Sivaguru Sankaridurg.

2017-07-19 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/trunk 845c4e52b -> 2843c688b


HADOOP-14642. wasb: add support for caching Authorization and SASKeys. 
Contributed by Sivaguru Sankaridurg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2843c688
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2843c688
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2843c688

Branch: refs/heads/trunk
Commit: 2843c688bcc21c65eb3538ffb3caeaffe440eda8
Parents: 845c4e5
Author: Jitendra Pandey 
Authored: Wed Jul 19 00:13:06 2017 -0700
Committer: Jitendra Pandey 
Committed: Wed Jul 19 00:13:06 2017 -0700

--
 .../src/main/resources/core-default.xml |   9 +-
 .../conf/TestCommonConfigurationFields.java |   1 +
 .../hadoop/fs/azure/CachingAuthorizer.java  | 232 +++
 .../fs/azure/LocalSASKeyGeneratorImpl.java  |  28 ++-
 .../hadoop/fs/azure/NativeAzureFileSystem.java  |   3 -
 .../fs/azure/RemoteSASKeyGeneratorImpl.java |  46 +++-
 .../fs/azure/RemoteWasbAuthorizerImpl.java  |  38 ++-
 .../hadoop/fs/azure/SASKeyGeneratorImpl.java|   4 +-
 .../hadoop-azure/src/site/markdown/index.md |  38 +++
 .../hadoop/fs/azure/AbstractWasbTestBase.java   |   5 +
 .../hadoop/fs/azure/MockWasbAuthorizerImpl.java |  22 +-
 .../TestNativeAzureFSAuthorizationCaching.java  |  60 +
 .../TestNativeAzureFileSystemAuthorization.java |  86 ++-
 ...veAzureFileSystemAuthorizationWithOwner.java |   2 +-
 .../fs/azure/TestWasbRemoteCallHelper.java  |   6 +-
 .../src/test/resources/azure-test.xml   |   3 +-
 16 files changed, 499 insertions(+), 84 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml 
b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index a705a4e..68b0a9d 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -1343,7 +1343,14 @@
 configuration
   
 
-
+
+  fs.azure.authorization.caching.enable
+  true
+  
+Config flag to enable caching of authorization results and saskeys in WASB.
+This flag is relevant only when fs.azure.authorization is enabled.
+  
+
 
 
   io.seqfile.compress.blocksize

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
index 8524973..593254eb 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestCommonConfigurationFields.java
@@ -115,6 +115,7 @@ public class TestCommonConfigurationFields extends 
TestConfigurationFieldsBase {
 xmlPropsToSkipCompare.add("fs.azure.local.sas.key.mode");
 xmlPropsToSkipCompare.add("fs.azure.secure.mode");
 xmlPropsToSkipCompare.add("fs.azure.authorization");
+xmlPropsToSkipCompare.add("fs.azure.authorization.caching.enable");
 
 // Deprecated properties.  These should eventually be removed from the
 // class.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2843c688/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
new file mode 100644
index 000..016ae74
--- /dev/null
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/CachingAuthorizer.java
@@ -0,0 +1,232 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * 

hadoop git commit: HDFS-12126. Ozone: Ozone shell: Add more testing for bucket shell commands. Contributed by Yiqun Lin.

2017-07-19 Thread wwei
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 bb0adcb6e -> a715f60ce


HDFS-12126. Ozone: Ozone shell: Add more testing for bucket shell commands. 
Contributed by Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a715f60c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a715f60c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a715f60c

Branch: refs/heads/HDFS-7240
Commit: a715f60ce1a46686037a4d74a0154cdd149ffd30
Parents: bb0adcb
Author: Weiwei Yang 
Authored: Wed Jul 19 14:05:49 2017 +0800
Committer: Weiwei Yang 
Committed: Wed Jul 19 14:05:54 2017 +0800

--
 .../web/handlers/BucketProcessTemplate.java |  20 +-
 .../apache/hadoop/ozone/web/ozShell/Shell.java  |   2 +
 .../web/ozShell/bucket/ListBucketHandler.java   |   2 +
 .../web/ozShell/volume/ListVolumeHandler.java   |  13 +-
 .../hadoop/ozone/web/utils/OzoneUtils.java  |  24 +++
 .../hadoop/ozone/ozShell/TestOzoneShell.java| 207 +++
 .../hadoop/ozone/web/client/TestKeys.java   |   5 +-
 7 files changed, 254 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a715f60c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
index 31bddb1..5a96a84 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/handlers/BucketProcessTemplate.java
@@ -151,24 +151,32 @@ public abstract class BucketProcessTemplate {
  IOException fsExp) throws OzoneException {
 LOG.debug("IOException: {}", fsExp);
 
+OzoneException exp = null;
 if (fsExp instanceof FileAlreadyExistsException) {
-  throw ErrorTable
+  exp = ErrorTable
   .newError(ErrorTable.BUCKET_ALREADY_EXISTS, reqID, bucket, hostName);
 }
 
 if (fsExp instanceof DirectoryNotEmptyException) {
-  throw ErrorTable
+  exp = ErrorTable
   .newError(ErrorTable.BUCKET_NOT_EMPTY, reqID, bucket, hostName);
 }
 
 if (fsExp instanceof NoSuchFileException) {
-  throw ErrorTable
+  exp = ErrorTable
   .newError(ErrorTable.INVALID_BUCKET_NAME, reqID, bucket, hostName);
 }
 
-// default we don't handle this exception yet.
-
-throw ErrorTable.newError(ErrorTable.SERVER_ERROR, reqID, bucket, 
hostName);
+// Default we don't handle this exception yet,
+// report a Server Internal Error.
+if (exp == null) {
+  exp =
+  ErrorTable.newError(ErrorTable.SERVER_ERROR, reqID, bucket, 
hostName);
+  if (fsExp != null) {
+exp.setMessage(fsExp.getMessage());
+  }
+}
+throw exp;
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a715f60c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
index f846b6d..a9d46f9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/Shell.java
@@ -402,6 +402,8 @@ public class Shell extends Configured implements Tool {
   System.err.printf("Command Failed : %s%n", ex.getMessage());
 } catch (OzoneException ex) {
   System.err.printf("Command Failed : %s%n", ex.toJsonString());
+} catch (IllegalArgumentException ex) {
+  System.err.printf("Illegal argument: %s%n", ex.getMessage());
 }
 return 1;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a715f60c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/ozShell/bucket/ListBucketHandler.java
index ce4a41e..a6a6e44 100644
---