[04/18] hadoop git commit: HADOOP-15756. [JDK10] Migrate from sun.net.util.IPAddressUtil to the replacement. Contributed by Akira Ajisaka.

2018-09-21 Thread shv
HADOOP-15756. [JDK10] Migrate from sun.net.util.IPAddressUtil to the 
replacement. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3da94a36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3da94a36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3da94a36

Branch: refs/heads/HDFS-12943
Commit: 3da94a36e21a315c09ec7edb7702820fe2b524f9
Parents: 646874c
Author: Ewan Higgs 
Authored: Thu Sep 20 14:53:21 2018 +0200
Committer: Ewan Higgs 
Committed: Thu Sep 20 14:53:21 2018 +0200

--
 .../org/apache/hadoop/security/SecurityUtil.java | 15 ++-
 1 file changed, 6 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3da94a36/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index 5f8cb29..0de334a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -54,9 +54,9 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 //this will need to be replaced someday when there is a suitable replacement
 import sun.net.dns.ResolverConfiguration;
-import sun.net.util.IPAddressUtil;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.net.InetAddresses;
 
 /**
  * Security Utils.
@@ -604,14 +604,11 @@ public final class SecurityUtil {
 public InetAddress getByName(String host) throws UnknownHostException {
   InetAddress addr = null;
 
-  if (IPAddressUtil.isIPv4LiteralAddress(host)) {
-// use ipv4 address as-is
-byte[] ip = IPAddressUtil.textToNumericFormatV4(host);
-addr = InetAddress.getByAddress(host, ip);
-  } else if (IPAddressUtil.isIPv6LiteralAddress(host)) {
-// use ipv6 address as-is
-byte[] ip = IPAddressUtil.textToNumericFormatV6(host);
-addr = InetAddress.getByAddress(host, ip);
+  if (InetAddresses.isInetAddress(host)) {
+// valid ip address. use it as-is
+addr = InetAddresses.forString(host);
+// set hostname
+addr = InetAddress.getByAddress(host, addr.getAddress());
   } else if (host.endsWith(".")) {
 // a rooted host ends with a dot, ex. "host."
 // rooted hosts never use the search path, so only try an exact lookup


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/18] hadoop git commit: HDDS-514. Clean Unregister JMX upon SCMConnectionManager#close. Contributed by Xiaoyu Yao.

2018-09-21 Thread shv
HDDS-514. Clean Unregister JMX upon SCMConnectionManager#close.
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/524f7cd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/524f7cd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/524f7cd3

Branch: refs/heads/HDFS-12943
Commit: 524f7cd354e0683c9ec61fdbce344ef79b841728
Parents: 096a716
Author: Anu Engineer 
Authored: Thu Sep 20 12:21:34 2018 -0700
Committer: Anu Engineer 
Committed: Thu Sep 20 12:21:34 2018 -0700

--
 .../container/common/statemachine/SCMConnectionManager.java   | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/524f7cd3/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
index 85fb580..775a91a 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
@@ -59,7 +59,7 @@ public class SCMConnectionManager
 
   private final int rpcTimeout;
   private final Configuration conf;
-  private final ObjectName jmxBean;
+  private ObjectName jmxBean;
 
   public SCMConnectionManager(Configuration conf) {
 this.mapLock = new ReentrantReadWriteLock();
@@ -191,7 +191,10 @@ public class SCMConnectionManager
   public void close() throws IOException {
 getValues().forEach(endpointStateMachine
 -> IOUtils.cleanupWithLogger(LOG, endpointStateMachine));
-MBeans.unregister(jmxBean);
+if (jmxBean != null) {
+  MBeans.unregister(jmxBean);
+  jmxBean = null;
+}
   }
 
   @Override


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/18] hadoop git commit: HDDS-394. Rename *Key Apis in DatanodeContainerProtocol to *Block apis. Contributed Dinesh Chitlangia.

2018-09-21 Thread shv
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
--
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
new file mode 100644
index 000..9df4249
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java
@@ -0,0 +1,127 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.helpers;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TestRule;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ThreadLocalRandom;
+
+/**
+ * Tests to test block deleting service.
+ */
+public class TestBlockData {
+  static final Logger LOG = LoggerFactory.getLogger(TestBlockData.class);
+  @Rule
+  public TestRule timeout = new Timeout(1);
+
+  static ContainerProtos.ChunkInfo buildChunkInfo(String name, long offset,
+  long len) {
+return ContainerProtos.ChunkInfo.newBuilder()
+.setChunkName(name).setOffset(offset).setLen(len).build();
+  }
+
+  @Test
+  public void testAddAndRemove() {
+final BlockData computed = new BlockData(null);
+final List expected = new ArrayList<>();
+
+assertChunks(expected, computed);
+long offset = 0;
+int n = 5;
+for(int i = 0; i < n; i++) {
+  offset += assertAddChunk(expected, computed, offset);
+}
+
+for(; !expected.isEmpty();) {
+  removeChunk(expected, computed);
+}
+  }
+
+  private static int chunkCount = 0;
+  static ContainerProtos.ChunkInfo addChunk(
+  List expected, long offset) {
+final long length = ThreadLocalRandom.current().nextLong(1000);
+final ContainerProtos.ChunkInfo info =
+buildChunkInfo("c" + ++chunkCount, offset, length);
+expected.add(info);
+return info;
+  }
+
+  static long assertAddChunk(List expected,
+  BlockData computed, long offset) {
+final ContainerProtos.ChunkInfo info = addChunk(expected, offset);
+LOG.info("addChunk: " + toString(info));
+computed.addChunk(info);
+assertChunks(expected, computed);
+return info.getLen();
+  }
+
+
+  static void removeChunk(List expected,
+  BlockData computed) {
+final int i = ThreadLocalRandom.current().nextInt(expected.size());
+final ContainerProtos.ChunkInfo info = expected.remove(i);
+LOG.info("removeChunk: " + toString(info));
+computed.removeChunk(info);
+assertChunks(expected, computed);
+  }
+
+  static void assertChunks(List expected,
+  BlockData computed) {
+final List computedChunks = 
computed.getChunks();
+Assert.assertEquals("expected=" + expected + "\ncomputed=" +
+computedChunks, expected, computedChunks);
+Assert.assertEquals(expected.stream().mapToLong(i -> i.getLen()).sum(),
+computed.getSize());
+  }
+
+  static String toString(ContainerProtos.ChunkInfo info) {
+return info.getChunkName() + ":" + info.getOffset() + "," + info.getLen();
+  }
+
+  static String toString(List infos) {
+return infos.stream().map(TestBlockData::toString)
+.reduce((left, right) -> left + ", " + right)
+.orElse("");
+  }
+
+  @Test
+  public void testSetChunks() {
+final BlockData computed = new BlockData(null);
+final List expected = new ArrayList<>();
+
+assertChunks(expected, computed);
+long offset = 0;
+int n = 5;
+for(int i = 0; i < n; i++) {
+  offset += addChunk(expected, offset).getLen();
+  LOG.info("setChunk: " + toString(expected));
+  computed.setChunks(expected);
+  assertChunks(expected, computed);
+}
+  }
+}


[15/18] hadoop git commit: Merge commit 'eca1a4bfe952fc184fe90dde50bac9b0e5293568' into HDFS-12943

2018-09-21 Thread shv
Merge commit 'eca1a4bfe952fc184fe90dde50bac9b0e5293568' into HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c37db9d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c37db9d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c37db9d

Branch: refs/heads/HDFS-12943
Commit: 6c37db9da36a4fe716f616977b1d382ffa8a027a
Parents: 4b0ff03 eca1a4b
Author: Konstantin V Shvachko 
Authored: Fri Sep 21 18:22:48 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Sep 21 18:22:48 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 +--
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  2 +-
 .../java/org/apache/hadoop/hdfs/PeerCache.java  |  8 ++---
 .../hdfs/client/impl/BlockReaderFactory.java| 12 +++
 .../client/impl/BlockReaderLocalLegacy.java |  2 +-
 .../hdfs/shortcircuit/ShortCircuitCache.java|  4 +--
 .../hdfs/shortcircuit/ShortCircuitReplica.java  |  2 +-
 .../apache/hadoop/hdfs/util/IOUtilsClient.java  |  3 +-
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java   |  4 +--
 .../hadoop/hdfs/nfs/nfs3/OpenFileCtxCache.java  |  6 ++--
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  7 +++--
 .../org/apache/hadoop/hdfs/HdfsDtFetcher.java   |  7 +++--
 .../org/apache/hadoop/hdfs/NameNodeProxies.java |  7 +++--
 .../apache/hadoop/hdfs/SWebHdfsDtFetcher.java   |  7 +++--
 .../apache/hadoop/hdfs/WebHdfsDtFetcher.java|  7 +++--
 .../hadoop/hdfs/net/DomainPeerServer.java   |  6 ++--
 .../apache/hadoop/hdfs/net/TcpPeerServer.java   |  6 ++--
 .../hdfs/qjournal/client/AsyncLoggerSet.java|  8 ++---
 .../qjournal/client/QuorumJournalManager.java   |  6 ++--
 .../qjournal/server/GetJournalEditServlet.java  |  7 +++--
 .../hadoop/hdfs/qjournal/server/Journal.java| 12 +++
 .../hdfs/qjournal/server/JournalNode.java   | 10 +++---
 .../qjournal/server/JournalNodeRpcServer.java   |  4 +--
 .../token/block/BlockTokenSecretManager.java|  7 +++--
 .../DelegationTokenSecretManager.java   |  8 ++---
 .../hadoop/hdfs/server/balancer/Balancer.java   |  8 ++---
 .../hadoop/hdfs/server/balancer/Dispatcher.java |  6 ++--
 .../hdfs/server/balancer/NameNodeConnector.java |  7 +++--
 .../AvailableSpaceBlockPlacementPolicy.java |  8 ++---
 .../server/blockmanagement/DatanodeManager.java |  6 ++--
 .../server/blockmanagement/HostFileManager.java |  7 +++--
 .../hadoop/hdfs/server/common/JspHelper.java|  6 ++--
 .../hdfs/server/common/MetricsLoggerTask.java   |  6 ++--
 .../apache/hadoop/hdfs/server/common/Util.java  |  7 +++--
 .../hdfs/server/datanode/DirectoryScanner.java  |  7 +++--
 .../server/datanode/ProfilingFileIoEvents.java  |  7 +++--
 .../server/datanode/ShortCircuitRegistry.java   |  7 +++--
 .../AvailableSpaceVolumeChoosingPolicy.java |  7 +++--
 .../RoundRobinVolumeChoosingPolicy.java |  7 +++--
 .../datanode/fsdataset/impl/BlockPoolSlice.java |  8 ++---
 .../impl/FsDatasetAsyncDiskService.java |  7 +++--
 .../impl/RamDiskAsyncLazyPersistService.java|  7 +++--
 .../fsdataset/impl/RamDiskReplicaTracker.java   |  7 +++--
 .../server/datanode/web/DatanodeHttpServer.java |  6 ++--
 .../web/RestCsrfPreventionFilterHandler.java|  4 +--
 .../datanode/web/SimpleHttpProxyHandler.java|  4 +--
 .../web/webhdfs/DataNodeUGIProvider.java|  6 ++--
 .../datanode/web/webhdfs/ExceptionHandler.java  |  4 +--
 .../server/datanode/web/webhdfs/HdfsWriter.java |  8 ++---
 .../datanode/web/webhdfs/WebHdfsHandler.java| 10 +++---
 .../apache/hadoop/hdfs/server/mover/Mover.java  | 12 +++
 .../hadoop/hdfs/server/namenode/CachePool.java  |  2 --
 .../hdfs/server/namenode/CheckpointConf.java|  7 +++--
 .../hdfs/server/namenode/Checkpointer.java  |  8 ++---
 .../ContentSummaryComputationContext.java   |  8 ++---
 .../hadoop/hdfs/server/namenode/DfsServlet.java |  7 +++--
 .../namenode/EditLogBackupOutputStream.java |  7 +++--
 .../server/namenode/EditLogFileInputStream.java |  8 ++---
 .../namenode/EditLogFileOutputStream.java   | 11 ---
 .../hdfs/server/namenode/EditsDoubleBuffer.java |  7 +++--
 .../hdfs/server/namenode/FSEditLogAsync.java|  8 ++---
 .../hdfs/server/namenode/FSEditLogLoader.java   |  7 +++--
 .../hadoop/hdfs/server/namenode/FSImage.java|  9 +++---
 .../hdfs/server/namenode/FSImageFormat.java |  6 ++--
 .../server/namenode/FSImageFormatPBINode.java   |  7 +++--
 ...FSImagePreTransactionalStorageInspector.java | 10 +++---
 .../FSImageTransactionalStorageInspector.java   |  6 ++--
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../server/namenode/FSPermissionChecker.java|  6 ++--
 .../server/namenode/FileJournalManager.java |  7 +++--
 .../hadoop/hdfs/server/namenode/INode.java  |  6 ++--
 .../hdfs/server/namenode/INodesInPath.java  |  6 ++--
 .../hdfs/server/namenode/ImageServlet.java  |  

[05/18] hadoop git commit: HADOOP-15764. [JDK10] Migrate from sun.net.dns.ResolverConfiguration to the replacement. Contributed by Akira Ajisaka.

2018-09-21 Thread shv
HADOOP-15764. [JDK10] Migrate from sun.net.dns.ResolverConfiguration to the 
replacement. Contributed by Akira Ajisaka.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/429a07e0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/429a07e0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/429a07e0

Branch: refs/heads/HDFS-12943
Commit: 429a07e08c8c919b1679c0a80df73d147d95e8a6
Parents: 3da94a3
Author: Ewan Higgs 
Authored: Thu Sep 20 15:13:55 2018 +0200
Committer: Ewan Higgs 
Committed: Thu Sep 20 15:13:55 2018 +0200

--
 .../hadoop-client-minicluster/pom.xml| 17 -
 .../hadoop-client-runtime/pom.xml| 11 +++
 hadoop-common-project/hadoop-common/pom.xml  |  5 +
 .../org/apache/hadoop/security/SecurityUtil.java | 19 +--
 4 files changed, 33 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-client-modules/hadoop-client-minicluster/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-minicluster/pom.xml 
b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
index ea8d680..70fca8a 100644
--- a/hadoop-client-modules/hadoop-client-minicluster/pom.xml
+++ b/hadoop-client-modules/hadoop-client-minicluster/pom.xml
@@ -318,6 +318,10 @@
   commons-net
   commons-net
 
+
+  dnsjava
+  dnsjava
+
   
 
 
-
-  dnsjava:dnsjava
-  
-dig*
-jnamed*
-lookup*
-update*
-  
-
-
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-client-modules/hadoop-client-runtime/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 532fae9..bfa6c15 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -212,6 +212,17 @@
 ccache.txt
   
 
+
+
+  dnsjava:dnsjava
+  
+dig*
+jnamed*
+lookup*
+update*
+  
+
   
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-common-project/hadoop-common/pom.xml
--
diff --git a/hadoop-common-project/hadoop-common/pom.xml 
b/hadoop-common-project/hadoop-common/pom.xml
index 695dcde..1e6da92 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -324,6 +324,11 @@
   mockwebserver
   test
 
+
+  dnsjava
+  dnsjava
+  compile
+
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/429a07e0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index 0de334a..9fea535 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -27,6 +27,7 @@ import java.net.URI;
 import java.net.UnknownHostException;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.List;
@@ -52,8 +53,9 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.ZKUtil;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-//this will need to be replaced someday when there is a suitable replacement
-import sun.net.dns.ResolverConfiguration;
+import org.xbill.DNS.Name;
+import org.xbill.DNS.ResolverConfig;
+
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.net.InetAddresses;
@@ -584,10 +586,15 @@ public final class SecurityUtil {
*   hadoop.security.token.service.use_ip=false 
*/
   protected static class 

[06/18] hadoop git commit: YARN-8801. Fixed header comments for docker utility functions. Contributed by Zian Chen

2018-09-21 Thread shv
YARN-8801.  Fixed header comments for docker utility functions.
Contributed by Zian Chen


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aa4bd493
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aa4bd493
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aa4bd493

Branch: refs/heads/HDFS-12943
Commit: aa4bd493c309f09f8f2ea7449aa33c8b641fb8d2
Parents: 429a07e
Author: Eric Yang 
Authored: Thu Sep 20 13:08:59 2018 -0400
Committer: Eric Yang 
Committed: Thu Sep 20 13:08:59 2018 -0400

--
 .../container-executor/impl/utils/docker-util.h | 30 +++-
 1 file changed, 10 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aa4bd493/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
index 278dc53..7b7322d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/utils/docker-util.h
@@ -81,8 +81,7 @@ char *get_docker_binary(const struct configuration *conf);
  * Get the Docker command line string. The function will inspect the params 
file to determine the command to be run.
  * @param command_file File containing the params for the Docker command
  * @param conf Configuration struct containing the container-executor.cfg 
details
- * @param out Buffer to fill with the Docker command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating 
error
  */
 int get_docker_command(const char* command_file, const struct configuration* 
conf, args *args);
@@ -98,8 +97,7 @@ int get_use_entry_point_flag();
  * inspect command.
  * @param command_file File containing the params for the Docker inspect 
command
  * @param conf Configuration struct containing the container-executor.cfg 
details
- * @param out Buffer to fill with the inspect command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating 
error
  */
 int get_docker_inspect_command(const char* command_file, const struct 
configuration* conf, args *args);
@@ -108,8 +106,7 @@ int get_docker_inspect_command(const char* command_file, 
const struct configurat
  * Get the Docker load command line string. The function will verify that the 
params file is meant for the load command.
  * @param command_file File containing the params for the Docker load command
  * @param conf Configuration struct containing the container-executor.cfg 
details
- * @param out Buffer to fill with the load command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating 
error
  */
 int get_docker_load_command(const char* command_file, const struct 
configuration* conf, args *args);
@@ -118,8 +115,7 @@ int get_docker_load_command(const char* command_file, const 
struct configuration
  * Get the Docker pull command line string. The function will verify that the 
params file is meant for the pull command.
  * @param command_file File containing the params for the Docker pull command
  * @param conf Configuration struct containing the container-executor.cfg 
details
- * @param out Buffer to fill with the pull command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return code with 0 indicating success and non-zero codes indicating 
error
  */
 int get_docker_pull_command(const char* command_file, const struct 
configuration* conf, args *args);
@@ -128,8 +124,7 @@ int get_docker_pull_command(const char* command_file, const 
struct configuration
  * Get the Docker rm command line string. The function will verify that the 
params file is meant for the rm command.
  * @param command_file File containing the params for the Docker rm command
  * @param conf Configuration struct containing the container-executor.cfg 
details
- * @param out Buffer to fill with the rm command
- * @param outlen Size of the output buffer
+ * @param args Buffer to construct argv
  * @return Return 

[17/18] hadoop git commit: HDFS-13778. [SBN read] TestStateAlignmentContextWithHA should use real ObserverReadProxyProvider instead of AlignmentContextProxyProvider. Contributed by Konstantin Shvachko

2018-09-21 Thread shv
HDFS-13778. [SBN read] TestStateAlignmentContextWithHA should use real 
ObserverReadProxyProvider instead of AlignmentContextProxyProvider. Contributed 
by Konstantin Shvachko and Plamen Jeliazkov.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a1f9c005
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a1f9c005
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a1f9c005

Branch: refs/heads/HDFS-12943
Commit: a1f9c0051664e4a13118258c2219081cd7d91e77
Parents: c04e0c0
Author: Konstantin V Shvachko 
Authored: Mon Sep 17 18:25:27 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Sep 21 18:31:11 2018 -0700

--
 .../hdfs/TestStateAlignmentContextWithHA.java   | 186 ++-
 1 file changed, 57 insertions(+), 129 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a1f9c005/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
index 1acbd75..a494252 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestStateAlignmentContextWithHA.java
@@ -18,28 +18,24 @@
 
 package org.apache.hadoop.hdfs;
 
-import static org.hamcrest.CoreMatchers.containsString;
 import static org.hamcrest.CoreMatchers.is;
-import static org.hamcrest.CoreMatchers.not;
 import static org.junit.Assert.assertThat;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.hdfs.server.namenode.ha.ClientHAProxyFactory;
-import 
org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAProxyFactory;
 import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
-import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos;
-import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.ha.ObserverReadProxyProvider;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
-import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.URI;
@@ -61,55 +57,31 @@ import java.util.concurrent.TimeUnit;
  * to the most recent alignment state of the server.
  */
 public class TestStateAlignmentContextWithHA {
+  public static final Logger LOG =
+  LoggerFactory.getLogger(TestStateAlignmentContextWithHA.class.getName());
 
   private static final int NUMDATANODES = 1;
   private static final int NUMCLIENTS = 10;
-  private static final int NUMFILES = 300;
+  private static final int NUMFILES = 120;
   private static final Configuration CONF = new HdfsConfiguration();
-  private static final String NAMESERVICE = "nameservice";
   private static final List AC_LIST = new ArrayList<>();
 
   private static MiniDFSCluster cluster;
   private static List clients;
-  private static ClientGSIContext spy;
 
   private DistributedFileSystem dfs;
   private int active = 0;
   private int standby = 1;
 
-  static class AlignmentContextProxyProvider
-  extends ConfiguredFailoverProxyProvider {
+  static class ORPPwithAlignmentContexts
+  extends ObserverReadProxyProvider {
 
-private ClientGSIContext alignmentContext;
-
-public AlignmentContextProxyProvider(
+public ORPPwithAlignmentContexts(
 Configuration conf, URI uri, Class xface,
 HAProxyFactory factory) throws IOException {
   super(conf, uri, xface, factory);
 
-  // Create and set AlignmentContext in HAProxyFactory.
-  // All proxies by factory will now have AlignmentContext assigned.
-  this.alignmentContext = (spy != null ? spy : new ClientGSIContext());
-  ((ClientHAProxyFactory) 
factory).setAlignmentContext(alignmentContext);
-
-  AC_LIST.add(alignmentContext);
-}
-  }
-
-  static class SpyConfiguredContextProxyProvider
-  extends ConfiguredFailoverProxyProvider {
-
-private ClientGSIContext alignmentContext;
-
-public SpyConfiguredContextProxyProvider(
-Configuration conf, URI uri, Class xface,
-HAProxyFactory factory) throws IOException {
-  super(conf, uri, 

[14/18] hadoop git commit: Merge commit 'b3161c4dd9367c68b30528a63c03756eaa32aaf9' into HDFS-12943

2018-09-21 Thread shv
Merge commit 'b3161c4dd9367c68b30528a63c03756eaa32aaf9' into HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b0ff03f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b0ff03f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b0ff03f

Branch: refs/heads/HDFS-12943
Commit: 4b0ff03f6f87dfb3c50f59e12377b9c24c4fc491
Parents: 4cdd0b9 b3161c4
Author: Konstantin V Shvachko 
Authored: Fri Sep 21 18:18:24 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Sep 21 18:18:24 2018 -0700

--
 .../hadoop/metrics2/annotation/Metric.java  |   5 +
 .../metrics2/lib/MutableMetricsFactory.java |   4 +
 .../apache/hadoop/hdds/scm/XceiverClient.java   |   8 +-
 .../hadoop/hdds/scm/XceiverClientGrpc.java  |   9 +-
 .../hadoop/hdds/scm/XceiverClientRatis.java |  36 ++-
 .../scm/client/ContainerOperationClient.java|   2 +-
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |  11 +
 .../apache/hadoop/hdds/scm/ScmConfigKeys.java   |  17 +-
 .../hadoop/hdds/scm/XceiverClientSpi.java   |  10 +-
 .../apache/hadoop/ozone/OzoneConfigKeys.java|  12 +
 .../main/java/org/apache/ratis/RatisHelper.java |  22 +-
 .../common/src/main/resources/ozone-default.xml |  31 ++-
 .../container/common/impl/HddsDispatcher.java   |   6 +-
 .../common/statemachine/StateContext.java   |  45 
 .../states/endpoint/HeartbeatEndpointTask.java  |  28 +++
 .../server/ratis/ContainerStateMachine.java |  17 +-
 .../server/ratis/XceiverServerRatis.java| 205 -
 .../container/ozoneimpl/OzoneContainer.java |   2 +-
 .../StorageContainerDatanodeProtocol.proto  |  26 +++
 .../hdds/scm/container/ContainerMapping.java| 109 +++--
 .../scm/container/ContainerStateManager.java|   5 +-
 .../hadoop/hdds/scm/container/Mapping.java  |  14 ++
 .../scm/container/closer/ContainerCloser.java   | 194 
 .../hadoop/hdds/scm/events/SCMEvents.java   |  24 +-
 .../hadoop/hdds/scm/node/StaleNodeHandler.java  |  16 +-
 .../hdds/scm/pipelines/Node2PipelineMap.java|  34 +--
 .../pipelines/PipelineActionEventHandler.java   |  60 +
 .../scm/pipelines/PipelineCloseHandler.java |  38 
 .../hdds/scm/pipelines/PipelineManager.java |  10 +-
 .../hdds/scm/pipelines/PipelineSelector.java|  46 ++--
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  14 +-
 .../standalone/StandaloneManagerImpl.java   |   7 +-
 .../server/SCMDatanodeHeartbeatDispatcher.java  |  23 ++
 .../scm/server/StorageContainerManager.java |  13 +-
 .../scm/container/TestContainerMapping.java |  43 
 .../container/closer/TestContainerCloser.java   | 228 ---
 .../mapreduce/v2/hs/HistoryFileManager.java |  12 +-
 .../mapreduce/v2/hs/TestHistoryFileManager.java |  52 +
 .../ozone/om/helpers/OmKeyLocationInfo.java |  10 +
 .../hdds/scm/pipeline/TestNode2PipelineMap.java |   6 +-
 .../hdds/scm/pipeline/TestNodeFailure.java  | 126 ++
 .../hdds/scm/pipeline/TestPipelineClose.java|   6 +-
 .../apache/hadoop/ozone/MiniOzoneCluster.java   |  15 ++
 .../hadoop/ozone/MiniOzoneClusterImpl.java  |  21 ++
 .../transport/server/ratis/TestCSMMetrics.java  |   3 +-
 .../container/server/TestContainerServer.java   |   3 +-
 .../server/TestContainerStateMachine.java   |   2 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |  26 ++-
 .../hadoop/ozone/om/VolumeManagerImpl.java  |   2 +-
 49 files changed, 936 insertions(+), 722 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[03/18] hadoop git commit: HADOOP-15748. S3 listing inconsistency can raise NPE in globber. Contributed by Steve Loughran.

2018-09-21 Thread shv
HADOOP-15748. S3 listing inconsistency can raise NPE in globber.
Contributed by Steve Loughran.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/646874c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/646874c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/646874c3

Branch: refs/heads/HDFS-12943
Commit: 646874c326139457b79cf8cfa547b3c91a78c7b4
Parents: 7ad27e9
Author: Steve Loughran 
Authored: Thu Sep 20 13:04:52 2018 +0100
Committer: Steve Loughran 
Committed: Thu Sep 20 13:04:52 2018 +0100

--
 .../src/main/java/org/apache/hadoop/fs/Globber.java| 13 -
 1 file changed, 12 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/646874c3/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
index ca3db1d..b241a94 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java
@@ -245,7 +245,18 @@ class Globber {
   // incorrectly conclude that /a/b was a file and should not match
   // /a/*/*.  So we use getFileStatus of the path we just listed to
   // disambiguate.
-  if (!getFileStatus(candidate.getPath()).isDirectory()) {
+  Path path = candidate.getPath();
+  FileStatus status = getFileStatus(path);
+  if (status == null) {
+// null means the file was not found
+LOG.warn("File/directory {} not found:"
++ " it may have been deleted."
++ " If this is an object store, this can be a sign of"
++ " eventual consistency problems.",
+path);
+continue;
+  }
+  if (!status.isDirectory()) {
 continue;
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/18] hadoop git commit: YARN-8628. [UI2] Few duplicated or inconsistent information displayed in UI2. Contributed by Akhil PB.

2018-09-21 Thread shv
YARN-8628. [UI2] Few duplicated or inconsistent information displayed in UI2. 
Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2752779
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2752779
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2752779

Branch: refs/heads/HDFS-12943
Commit: a2752779ac1545f5e0a52fce3cff02a7007e95fb
Parents: 524f7cd
Author: Sunil G 
Authored: Fri Sep 21 15:47:10 2018 +0530
Committer: Sunil G 
Committed: Fri Sep 21 15:47:10 2018 +0530

--
 .../src/main/webapp/app/controllers/yarn-app/components.js| 2 +-
 .../webapp/app/controllers/yarn-component-instance/info.js| 5 +++--
 .../webapp/app/controllers/yarn-component-instances/info.js   | 3 ++-
 .../main/webapp/app/routes/yarn-component-instance/info.js| 4 ++--
 .../main/webapp/app/serializers/yarn-component-instance.js| 1 -
 .../src/main/webapp/app/serializers/yarn-container.js | 2 +-
 .../src/main/webapp/app/serializers/yarn-service-component.js | 2 +-
 .../main/webapp/app/serializers/yarn-timeline-container.js| 2 +-
 .../src/main/webapp/app/templates/yarn-app/configs.hbs| 7 ---
 .../webapp/app/templates/yarn-component-instance/info.hbs | 4 
 10 files changed, 15 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
index 5981eb5..5a6c616 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
@@ -41,7 +41,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.get('name'),
-  href: 
`#/yarn-component-instances/${row.get('name')}/info?service=${service}&=${appId}`
+  href: 
`#/yarn-component-instances/${row.get('name')}/info?service=${service}=${appId}`
 };
   }
 }, {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
index e3abcb7..e920aa2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
@@ -19,7 +19,8 @@
 import Ember from 'ember';
 
 export default Ember.Controller.extend({
-  queryParams: ["appid", "service"],
+  queryParams: ["appid", "service", "containerid"],
   appid: undefined,
-  service: undefined
+  service: undefined,
+  containerid: undefined
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
index 44cfe17..be4b4f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
@@ -42,9 +42,10 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 var component = row.get('component');
 var instance = row.get('instanceName');
+var containerId = row.get('containerId');
 return {
   text: instance,
-  href: 
`#/yarn-component-instance/${component}/instances/${instance}/info?appid=${appId}&=${serviceName}`
+  href: 
`#/yarn-component-instance/${component}/instances/${instance}/info?appid=${appId}=${serviceName}=${containerId}`
 };
   }
 }, {


[18/18] hadoop git commit: HDFS-13749. [SBN read] Use getServiceStatus to discover observer namenodes. Contributed by Chao Sun.

2018-09-21 Thread shv
HDFS-13749. [SBN read] Use getServiceStatus to discover observer namenodes. 
Contributed by Chao Sun.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/741547e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/741547e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/741547e1

Branch: refs/heads/HDFS-12943
Commit: 741547e1687c186e186d05be09e7d30dfea7226f
Parents: a1f9c00
Author: Erik Krogen 
Authored: Thu Sep 20 13:27:58 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Sep 21 18:31:40 2018 -0700

--
 .../hadoop/hdfs/NameNodeProxiesClient.java  |  47 -
 .../ha/AbstractNNFailoverProxyProvider.java |  36 +--
 .../namenode/ha/IPFailoverProxyProvider.java|   2 +-
 .../namenode/ha/ObserverReadProxyProvider.java  |  49 +
 .../ha/TestObserverReadProxyProvider.java   | 105 ++-
 5 files changed, 151 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/741547e1/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
index 284e4ef..f90d671 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/NameNodeProxiesClient.java
@@ -25,12 +25,16 @@ import java.net.InetSocketAddress;
 import java.net.URI;
 import java.util.HashMap;
 import java.util.Map;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.ha.HAServiceProtocol;
+import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.namenode.ha.ClientHAProxyFactory;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAProxyFactory;
 import org.apache.hadoop.ipc.AlignmentContext;
+import org.apache.hadoop.ipc.Client;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -62,13 +66,14 @@ import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /**
- * Create proxy objects with {@link ClientProtocol} to communicate with a 
remote
- * NN. Generally use {@link 
NameNodeProxiesClient#createProxyWithClientProtocol(
+ * Create proxy objects with {@link ClientProtocol} and
+ * {@link HAServiceProtocol} to communicate with a remote NN. For the former,
+ * generally use {@link NameNodeProxiesClient#createProxyWithClientProtocol(
  * Configuration, URI, AtomicBoolean)}, which will create either an HA- or
  * non-HA-enabled client proxy as appropriate.
  *
- * For creating proxy objects with other protocols, please see
- * {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
+ * For creating proxy objects with other protocols, please see the server-side
+ * counterpart {@code NameNodeProxies#createProxy}
  */
 @InterfaceAudience.Private
 public class NameNodeProxiesClient {
@@ -76,6 +81,11 @@ public class NameNodeProxiesClient {
   private static final Logger LOG = LoggerFactory.getLogger(
   NameNodeProxiesClient.class);
 
+  /** Maximum # of retries for HAProxy with HAServiceProtocol. */
+  private static final int MAX_RETRIES = 3;
+  /** Initial retry delay for HAProxy with HAServiceProtocol. */
+  private static final int DELAY_MILLISECONDS = 200;
+
   /**
* Wrapper for a client proxy as well as its associated service ID.
* This is simply used as a tuple-like return type for created NN proxy.
@@ -119,7 +129,6 @@ public class NameNodeProxiesClient {
* @return an object containing both the proxy and the associated
* delegation token service it corresponds to
* @throws IOException if there is an error creating the proxy
-   * @see {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
*/
   public static ProxyAndInfo createProxyWithClientProtocol(
   Configuration conf, URI nameNodeUri, AtomicBoolean fallbackToSimpleAuth)
@@ -343,6 +352,34 @@ public class NameNodeProxiesClient {
 fallbackToSimpleAuth, null);
   }
 
+  /**
+   * Creates a non-HA proxy object with {@link HAServiceProtocol} to the
+   * given NameNode address, using the provided configuration. The proxy will
+   * use the RPC timeout configuration specified via {@link
+   * org.apache.hadoop.fs.CommonConfigurationKeys#IPC_CLIENT_RPC_TIMEOUT_KEY}.
+   * Upon failures, this will retry up to certain times with {@link 

[01/18] hadoop git commit: HDFS-13892. Disk Balancer: Make execute command documentation better. Contributed by Ranith Sardar. [Forced Update!]

2018-09-21 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-12943 77e106f74 -> 741547e16 (forced update)


HDFS-13892. Disk Balancer: Make execute command documentation better.
Contributed by  Ranith Sardar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6fc293fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6fc293fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6fc293fe

Branch: refs/heads/HDFS-12943
Commit: 6fc293fece935e3524ae59699aa3c3e3d98f6d86
Parents: 6b5838e
Author: Anu Engineer 
Authored: Wed Sep 19 20:48:41 2018 -0700
Committer: Anu Engineer 
Committed: Wed Sep 19 20:48:41 2018 -0700

--
 .../hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md| 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6fc293fe/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
index 5dd6ffc..955f179 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
@@ -78,7 +78,9 @@ Execute command takes a plan command executes it against the 
datanode that plan
 `hdfs diskbalancer -execute /system/diskbalancer/nodename.plan.json`
 
 This executes the plan by reading datanode’s address from the plan file.
-
+When DiskBalancer executes the plan, it is the beginning of an asynchronous 
process that can take a long time.
+So, query command can help to get the current status of execute command.
+ 
 | COMMAND\_OPTION| Description |
 |: |: |
 | `-skipDateCheck` |  Skip date check and force execute the plan.|


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/18] hadoop git commit: HDDS-394. Rename *Key Apis in DatanodeContainerProtocol to *Block apis. Contributed Dinesh Chitlangia.

2018-09-21 Thread shv
http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index ed4536f..4f2b3a2 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -31,7 +31,7 @@ import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 .ContainerCommandResponseProto;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataStore;
@@ -116,7 +116,7 @@ public final class KeyValueContainerUtil {
 File chunksPath = new File(containerData.getChunksPath());
 
 // Close the DB connection and remove the DB handler from cache
-KeyUtils.removeDB(containerData, conf);
+BlockUtils.removeDB(containerData, conf);
 
 // Delete the Container MetaData path.
 FileUtils.deleteDirectory(containerMetaDataPath);
@@ -175,16 +175,16 @@ public final class KeyValueContainerUtil {
 }
 kvContainerData.setDbFile(dbFile);
 
-MetadataStore metadata = KeyUtils.getDB(kvContainerData, config);
+MetadataStore metadata = BlockUtils.getDB(kvContainerData, config);
 long bytesUsed = 0;
 List> liveKeys = metadata
 .getRangeKVs(null, Integer.MAX_VALUE,
 MetadataKeyFilters.getNormalKeyFilter());
 bytesUsed = liveKeys.parallelStream().mapToLong(e-> {
-  KeyData keyData;
+  BlockData blockData;
   try {
-keyData = KeyUtils.getKeyData(e.getValue());
-return keyData.getSize();
+blockData = BlockUtils.getBlockData(e.getValue());
+return blockData.getSize();
   } catch (IOException ex) {
 return 0L;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
index df60c60..3495363 100644
--- 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/SmallFileUtils.java
@@ -69,7 +69,7 @@ public final class SmallFileUtils {
 ContainerProtos.ReadChunkResponseProto.newBuilder();
 readChunkresponse.setChunkData(info.getProtoBufMessage());
 readChunkresponse.setData(ByteString.copyFrom(data));
-readChunkresponse.setBlockID(msg.getGetSmallFile().getKey().getBlockID());
+
readChunkresponse.setBlockID(msg.getGetSmallFile().getBlock().getBlockID());
 
 ContainerProtos.GetSmallFileResponseProto.Builder getSmallFile =
 ContainerProtos.GetSmallFileResponseProto.newBuilder();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
--
diff --git 
a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
new file mode 100644
index 000..54c15fb
--- /dev/null
+++ 
b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -0,0 +1,229 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ * 

[12/18] hadoop git commit: YARN-8769. [Submarine] Allow user to specify customized quicklink(s) when submit Submarine job. Contributed by Wangda Tan.

2018-09-21 Thread shv
YARN-8769. [Submarine] Allow user to specify customized quicklink(s) when 
submit Submarine job. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cd63461
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cd63461
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cd63461

Branch: refs/heads/HDFS-12943
Commit: 0cd63461021cc7cac39e7cc2bfaafd609c82fc79
Parents: a275277
Author: Sunil G 
Authored: Fri Sep 21 23:39:22 2018 +0530
Committer: Sunil G 
Committed: Fri Sep 21 23:39:22 2018 +0530

--
 .../yarn/submarine/client/cli/CliConstants.java |  1 +
 .../yarn/submarine/client/cli/RunJobCli.java|  8 ++
 .../submarine/client/cli/param/Quicklink.java   | 71 ++
 .../client/cli/param/RunJobParameters.java  | 18 
 .../yarnservice/YarnServiceJobSubmitter.java| 99 ++--
 .../runtimes/yarnservice/YarnServiceUtils.java  | 47 --
 .../yarnservice/TestYarnServiceRunJobCli.java   | 94 +++
 7 files changed, 303 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
index d51ffc7..454ff1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
@@ -49,6 +49,7 @@ public class CliConstants {
   public static final String WAIT_JOB_FINISH = "wait_job_finish";
   public static final String PS_DOCKER_IMAGE = "ps_docker_image";
   public static final String WORKER_DOCKER_IMAGE = "worker_docker_image";
+  public static final String QUICKLINK = "quicklink";
   public static final String TENSORBOARD_DOCKER_IMAGE =
   "tensorboard_docker_image";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
index faa22d3..5054a94 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
@@ -117,6 +117,14 @@ public class RunJobCli extends AbstractCli {
 options.addOption(CliConstants.WORKER_DOCKER_IMAGE, true,
 "Specify docker image for WORKER, when this is not specified, WORKER "
 + "uses --" + CliConstants.DOCKER_IMAGE + " as default.");
+options.addOption(CliConstants.QUICKLINK, true, "Specify quicklink so YARN"
++ "web UI shows link to given role instance and port. When "
++ "--tensorboard is speciied, quicklink to tensorboard instance will "
++ "be added automatically. The format of quick link is: "
++ "Quick_link_label=http(or https)://role-name:port. For example, "
++ "if want to link to first worker's 7070 port, and text of quicklink "
++ "is Notebook_UI, user need to specify --quicklink "
++ "Notebook_UI=https://master-0:7070;);
 options.addOption("h", "help", false, "Print help");
 return options;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/Quicklink.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/Quicklink.java
 

[09/18] hadoop git commit: HDDS-394. Rename *Key Apis in DatanodeContainerProtocol to *Block apis. Contributed Dinesh Chitlangia.

2018-09-21 Thread shv
HDDS-394. Rename *Key Apis in DatanodeContainerProtocol to *Block apis.
Contributed Dinesh Chitlangia.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/096a7160
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/096a7160
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/096a7160

Branch: refs/heads/HDFS-12943
Commit: 096a7160803494219581c067dfcdb67d2bd0bcdb
Parents: aa4bd49
Author: Anu Engineer 
Authored: Thu Sep 20 11:51:49 2018 -0700
Committer: Anu Engineer 
Committed: Thu Sep 20 11:51:49 2018 -0700

--
 .../hdds/scm/storage/ChunkOutputStream.java |  13 +-
 .../java/org/apache/hadoop/hdds/HddsUtils.java  |   8 +-
 .../scm/storage/ContainerProtocolCalls.java |  62 ++---
 .../container/common/helpers/BlockData.java | 255 +++
 .../ozone/container/common/helpers/KeyData.java | 253 --
 .../main/proto/DatanodeContainerProtocol.proto  |  74 +++---
 .../common/impl/OpenContainerBlockMap.java  |  46 ++--
 .../DeleteBlocksCommandHandler.java |   4 +-
 .../server/ratis/ContainerStateMachine.java |  28 +-
 .../keyvalue/KeyValueBlockIterator.java |  16 +-
 .../container/keyvalue/KeyValueContainer.java   |   4 +-
 .../container/keyvalue/KeyValueHandler.java | 124 -
 .../container/keyvalue/helpers/BlockUtils.java  | 199 +++
 .../container/keyvalue/helpers/KeyUtils.java| 199 ---
 .../keyvalue/helpers/KeyValueContainerUtil.java |  12 +-
 .../keyvalue/helpers/SmallFileUtils.java|   2 +-
 .../keyvalue/impl/BlockManagerImpl.java | 229 +
 .../container/keyvalue/impl/KeyManagerImpl.java | 227 -
 .../container/keyvalue/impl/package-info.java   |   5 +-
 .../keyvalue/interfaces/BlockManager.java   |  84 ++
 .../keyvalue/interfaces/KeyManager.java |  84 --
 .../keyvalue/interfaces/package-info.java   |  21 ++
 .../background/BlockDeletingService.java|  10 +-
 .../keyvalue/TestBlockManagerImpl.java  | 211 +++
 .../keyvalue/TestChunkManagerImpl.java  |   2 +-
 .../container/keyvalue/TestKeyManagerImpl.java  | 191 --
 .../keyvalue/TestKeyValueBlockIterator.java |  30 +--
 .../keyvalue/TestKeyValueContainer.java |  26 +-
 .../container/keyvalue/TestKeyValueHandler.java |  38 +--
 .../ozone/client/io/ChunkGroupInputStream.java  |   6 +-
 .../TestStorageContainerManagerHelper.java  |   8 +-
 .../ozone/client/rest/TestOzoneRestClient.java  |   8 +-
 .../ozone/client/rpc/TestOzoneRpcClient.java|   8 +-
 .../ozone/container/ContainerTestHelper.java|  84 +++---
 .../container/TestContainerReplication.java |  24 +-
 .../common/TestBlockDeletingService.java|  12 +-
 .../container/common/helpers/TestBlockData.java | 127 +
 .../container/common/helpers/TestKeyData.java   | 119 -
 .../common/impl/TestCloseContainerHandler.java  |  51 ++--
 .../common/impl/TestContainerPersistence.java   | 154 +--
 .../commandhandler/TestBlockDeletion.java   |   9 +-
 .../container/ozoneimpl/TestOzoneContainer.java | 100 
 .../server/TestContainerStateMachine.java   |   2 +-
 .../hadoop/ozone/om/TestOzoneManager.java   |   4 +-
 .../ozone/scm/TestContainerSmallFile.java   |   4 +-
 .../TestGetCommittedBlockLengthAndPutKey.java   |  12 +-
 .../hadoop/ozone/web/client/TestKeys.java   |  44 ++--
 .../hadoop/ozone/om/BucketManagerImpl.java  |   2 +-
 .../ozone/om/ScmBlockLocationTestIngClient.java |   2 +-
 .../genesis/BenchMarkDatanodeDispatcher.java|  42 +--
 50 files changed, 1680 insertions(+), 1599 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/096a7160/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
--
diff --git 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
index 8d311d0..10b3bb5 100644
--- 
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
+++ 
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java
@@ -23,7 +23,7 @@ import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyData;
+import 
org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
 import 

[13/18] hadoop git commit: Merge commit '9af96d4ed4b6f80d3ca53a2b003d2ef768650dd4' into HDFS-12943

2018-09-21 Thread shv
Merge commit '9af96d4ed4b6f80d3ca53a2b003d2ef768650dd4' into HDFS-12943

# Conflicts:
#   
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4cdd0b9c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4cdd0b9c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4cdd0b9c

Branch: refs/heads/HDFS-12943
Commit: 4cdd0b9cdc9c39384333c1757766f02b1b9d0daf
Parents: 94d7f90 9af96d4
Author: Konstantin V Shvachko 
Authored: Mon Sep 17 17:39:11 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Sep 21 18:17:41 2018 -0700

--
 .../org/apache/hadoop/http/IsActiveServlet.java | 71 +++
 .../apache/hadoop/http/TestIsActiveServlet.java | 95 
 .../router/IsRouterActiveServlet.java   | 37 
 .../federation/router/RouterHttpServer.java |  9 ++
 .../src/site/markdown/HDFSRouterFederation.md   |  2 +-
 .../namenode/IsNameNodeActiveServlet.java   | 33 +++
 .../server/namenode/NameNodeHttpServer.java |  3 +
 .../markdown/HDFSHighAvailabilityWithQJM.md |  8 ++
 .../IsResourceManagerActiveServlet.java | 38 
 .../server/resourcemanager/ResourceManager.java |  5 ++
 .../resourcemanager/webapp/RMWebAppFilter.java  |  3 +-
 .../src/site/markdown/ResourceManagerHA.md  |  5 ++
 12 files changed, 307 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4cdd0b9c/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
--
diff --cc 
hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
index 0d20091,e4363fb..76a9837
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSHighAvailabilityWithQJM.md
@@@ -423,34 -423,14 +423,42 @@@ This guide describes high-level uses o
  **Note:** This is not yet implemented, and at present will always return
  success, unless the given NameNode is completely down.
  
+ 
+ ### Load Balancer Setup
+ 
+ If you are running a set of NameNodes behind a Load Balancer (e.g. 
[Azure](https://docs.microsoft.com/en-us/azure/load-balancer/load-balancer-custom-probe-overview)
 or 
[AWS](https://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-healthchecks.html)
 ) and would like the Load Balancer to point to the active NN, you can use the 
/isActive HTTP endpoint as a health probe.
+ http://NN_HOSTNAME/isActive will return a 200 status code response if the NN 
is in Active HA State, 405 otherwise.
+ 
+ 
+ 
 +### In-Progress Edit Log Tailing
 +
 +Under the default settings, the Standby NameNode will only apply edits that 
are present in an edit
 +log segments which has been finalized. If it is desirable to have a Standby 
NameNode which has more
 +up-to-date namespace information, it is possible to enable tailing of 
in-progress edit segments.
 +This setting will attempt to fetch edits from an in-memory cache on the 
JournalNodes and can reduce
 +the lag time before a transaction is applied on the Standby NameNode to the 
order of milliseconds.
 +If an edit cannot be served from the cache, the Standby will still be able to 
retrieve it, but the
 +lag time will be much longer. The relevant configurations are:
 +
 +*   **dfs.ha.tail-edits.in-progress** - Whether or not to enable tailing on 
in-progress edits logs.
 +This will also enable the in-memory edit cache on the JournalNodes. 
Disabled by default.
 +
 +*   **dfs.journalnode.edit-cache-size.bytes** - The size of the in-memory 
cache of edits on the
 +JournalNode. Edits take around 200 bytes each in a typical environment, 
so, for example, the
 +default of 1048576 (1MB) can hold around 5000 transactions. It is 
recommended to monitor the
 +JournalNode metrics RpcRequestCacheMissAmountNumMisses and 
RpcRequestCacheMissAmountAvgTxns,
 +which respectively count the number of requests unable to be served by 
the cache, and the extra
 +number of transactions which would have needed to have been in the cache 
for the request to
 +succeed. For example, if a request attempted to fetch edits starting at 
transaction ID 10, but
 +the oldest data in the cache was at transaction ID 20, a value of 10 
would be added to the
 +average.
 +
 +This feature is primarily useful in conjunction with the Standby/Observer 
Read feature. Using this
 +feature, read requests can be serviced from non-active NameNodes; thus 
tailing in-progress edits
 +provides these nodes with the ability to serve requests with data which is 
much more fresh. See the
 +Apache JIRA ticket HDFS-12943 for more information on 

[02/18] hadoop git commit: HADOOP-15736. Trash : Negative Value For Deletion Interval Leads To Abnormal Behaviour. Contributed by Ayush Saxena.

2018-09-21 Thread shv
HADOOP-15736. Trash : Negative Value For Deletion Interval Leads To Abnormal 
Behaviour. Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ad27e97
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ad27e97
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ad27e97

Branch: refs/heads/HDFS-12943
Commit: 7ad27e97f05b13b33fdcef9cb63ace9c1728bfb5
Parents: 6fc293f
Author: Vinayakumar B 
Authored: Thu Sep 20 09:31:35 2018 +0530
Committer: Vinayakumar B 
Committed: Thu Sep 20 09:31:35 2018 +0530

--
 .../main/java/org/apache/hadoop/fs/TrashPolicyDefault.java   | 8 +++-
 .../src/test/java/org/apache/hadoop/fs/TestTrash.java| 6 ++
 2 files changed, 13 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad27e97/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index 6e101a2..39d5e73 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -101,6 +101,12 @@ public class TrashPolicyDefault extends TrashPolicy {
 this.emptierInterval = (long)(conf.getFloat(
 FS_TRASH_CHECKPOINT_INTERVAL_KEY, FS_TRASH_CHECKPOINT_INTERVAL_DEFAULT)
 * MSECS_PER_MINUTE);
+if (deletionInterval < 0) {
+  LOG.warn("Invalid value {} for deletion interval,"
+  + " deletion interaval can not be negative."
+  + "Changing to default value 0", deletionInterval);
+  this.deletionInterval = 0;
+}
   }
 
   private Path makeTrashRelativePath(Path basePath, Path rmFilePath) {
@@ -109,7 +115,7 @@ public class TrashPolicyDefault extends TrashPolicy {
 
   @Override
   public boolean isEnabled() {
-return deletionInterval != 0;
+return deletionInterval > 0;
   }
 
   @SuppressWarnings("deprecation")

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ad27e97/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
index 568821b..04f56fb 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestTrash.java
@@ -132,6 +132,9 @@ public class TestTrash {
 conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
 assertFalse(new Trash(conf).isEnabled());
 
+conf.setLong(FS_TRASH_INTERVAL_KEY, -1); // disabled
+assertFalse(new Trash(conf).isEnabled());
+
 conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
 assertTrue(new Trash(conf).isEnabled());
 
@@ -526,6 +529,9 @@ public class TestTrash {
 conf.setLong(FS_TRASH_INTERVAL_KEY, 0); // disabled
 assertFalse(new Trash(conf).isEnabled());
 
+conf.setLong(FS_TRASH_INTERVAL_KEY, -1); // disabled
+assertFalse(new Trash(conf).isEnabled());
+
 conf.setLong(FS_TRASH_INTERVAL_KEY, 10); // 10 minute
 assertTrue(new Trash(conf).isEnabled());
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[16/18] hadoop git commit: Merge branch 'trunk' into HDFS-12943

2018-09-21 Thread shv
Merge branch 'trunk' into HDFS-12943


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c04e0c0e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c04e0c0e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c04e0c0e

Branch: refs/heads/HDFS-12943
Commit: c04e0c0e9951aab88d7e5a4f47bf24a045f6171c
Parents: 6c37db9 0cd6346
Author: Konstantin V Shvachko 
Authored: Fri Sep 21 18:24:51 2018 -0700
Committer: Konstantin V Shvachko 
Committed: Fri Sep 21 18:28:31 2018 -0700

--
 dev-support/bin/create-release  |4 +-
 dev-support/bin/ozone-dist-layout-stitching |   32 +-
 dev-support/bin/yetus-wrapper   |2 +-
 dev-support/docker/Dockerfile   |  212 +-
 .../assemblies/hadoop-src-with-hdds.xml |   56 +
 .../assemblies/hadoop-src-with-hdsl.xml |   56 -
 .../hadoop-client-minicluster/pom.xml   |   17 +-
 .../hadoop-client-runtime/pom.xml   |   11 +
 .../hadoop-annotations/pom.xml  |   24 +
 hadoop-common-project/hadoop-common/pom.xml |7 +
 .../src/main/conf/log4j.properties  |   23 -
 .../apache/hadoop/crypto/CryptoStreamUtils.java |   21 +-
 .../main/java/org/apache/hadoop/fs/Globber.java |   13 +-
 .../apache/hadoop/fs/TrashPolicyDefault.java|   10 +-
 .../main/java/org/apache/hadoop/ha/HAAdmin.java |7 +-
 .../org/apache/hadoop/io/nativeio/NativeIO.java |   15 +-
 .../org/apache/hadoop/ipc/CallQueueManager.java |5 +-
 .../apache/hadoop/log/LogThrottlingHelper.java  |  358 ++
 .../apache/hadoop/security/SecurityUtil.java|   34 +-
 .../org/apache/hadoop/util/CleanerUtil.java |  199 +
 .../org/apache/hadoop/util/StringUtils.java |2 +-
 .../hadoop/util/curator/ZKCuratorManager.java   |   10 +-
 .../src/main/resources/core-default.xml |   24 +-
 .../markdown/release/0.1.0/CHANGELOG.0.1.0.md   |  101 +
 .../markdown/release/0.1.0/CHANGES.0.1.0.md |  101 -
 .../markdown/release/0.1.1/CHANGELOG.0.1.1.md   |   39 +
 .../markdown/release/0.1.1/CHANGES.0.1.1.md |   39 -
 .../markdown/release/0.10.0/CHANGELOG.0.10.0.md |  101 +
 .../markdown/release/0.10.0/CHANGES.0.10.0.md   |  101 -
 .../markdown/release/0.10.1/CHANGELOG.0.10.1.md |   49 +
 .../markdown/release/0.10.1/CHANGES.0.10.1.md   |   49 -
 .../markdown/release/0.11.0/CHANGELOG.0.11.0.md |   96 +
 .../markdown/release/0.11.0/CHANGES.0.11.0.md   |   96 -
 .../markdown/release/0.11.1/CHANGELOG.0.11.1.md |   34 +
 .../markdown/release/0.11.1/CHANGES.0.11.1.md   |   34 -
 .../markdown/release/0.11.2/CHANGELOG.0.11.2.md |   33 +
 .../markdown/release/0.11.2/CHANGES.0.11.2.md   |   33 -
 .../markdown/release/0.12.0/CHANGELOG.0.12.0.md |  113 +
 .../markdown/release/0.12.0/CHANGES.0.12.0.md   |  113 -
 .../markdown/release/0.12.1/CHANGELOG.0.12.1.md |   59 +
 .../markdown/release/0.12.1/CHANGES.0.12.1.md   |   59 -
 .../markdown/release/0.12.2/CHANGELOG.0.12.2.md |   34 +
 .../markdown/release/0.12.2/CHANGES.0.12.2.md   |   34 -
 .../markdown/release/0.12.3/CHANGELOG.0.12.3.md |   38 +
 .../markdown/release/0.12.3/CHANGES.0.12.3.md   |   38 -
 .../markdown/release/0.13.0/CHANGELOG.0.13.0.md |  173 +
 .../markdown/release/0.13.0/CHANGES.0.13.0.md   |  173 -
 .../markdown/release/0.14.0/CHANGELOG.0.14.0.md |  214 +
 .../markdown/release/0.14.0/CHANGES.0.14.0.md   |  214 -
 .../markdown/release/0.14.1/CHANGELOG.0.14.1.md |   33 +
 .../markdown/release/0.14.1/CHANGES.0.14.1.md   |   33 -
 .../markdown/release/0.14.2/CHANGELOG.0.14.2.md |   40 +
 .../markdown/release/0.14.2/CHANGES.0.14.2.md   |   40 -
 .../markdown/release/0.14.3/CHANGELOG.0.14.3.md |   34 +
 .../markdown/release/0.14.3/CHANGES.0.14.3.md   |   34 -
 .../markdown/release/0.14.4/CHANGELOG.0.14.4.md |   39 +
 .../markdown/release/0.14.4/CHANGES.0.14.4.md   |   39 -
 .../markdown/release/0.15.0/CHANGELOG.0.15.0.md |  190 +
 .../markdown/release/0.15.0/CHANGES.0.15.0.md   |  190 -
 .../markdown/release/0.15.1/CHANGELOG.0.15.1.md |   49 +
 .../markdown/release/0.15.1/CHANGES.0.15.1.md   |   49 -
 .../markdown/release/0.15.2/CHANGELOG.0.15.2.md |   51 +
 .../markdown/release/0.15.2/CHANGES.0.15.2.md   |   51 -
 .../markdown/release/0.15.3/CHANGELOG.0.15.3.md |   35 +
 .../markdown/release/0.15.3/CHANGES.0.15.3.md   |   35 -
 .../markdown/release/0.15.4/CHANGELOG.0.15.4.md |   31 +
 .../markdown/release/0.15.4/CHANGES.0.15.4.md   |   31 -
 .../markdown/release/0.16.0/CHANGELOG.0.16.0.md |  225 ++
 .../markdown/release/0.16.0/CHANGES.0.16.0.md   |  225 --
 .../markdown/release/0.16.1/CHANGELOG.0.16.1.md |   94 +
 .../markdown/release/0.16.1/CHANGES.0.16.1.md   |   94 -
 .../markdown/release/0.16.2/CHANGELOG.0.16.2.md |   59 +
 .../markdown/release/0.16.2/CHANGES.0.16.2.md   |   59 -
 .../markdown/release/0.16.3/CHANGELOG.0.16.3.md |   37 +
 .../markdown/release/0.16.3/CHANGES.0.16.3.md   |   37 -
 

hadoop git commit: YARN-7599. [GPG] ApplicationCleaner in Global Policy Generator. Contributed by Botong Huang.

2018-09-21 Thread botong
Repository: hadoop
Updated Branches:
  refs/heads/YARN-7402 ef4d71c0b -> 3671dc3ef


YARN-7599. [GPG] ApplicationCleaner in Global Policy Generator. Contributed by 
Botong Huang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3671dc3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3671dc3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3671dc3e

Branch: refs/heads/YARN-7402
Commit: 3671dc3eff8b4de8ba33922204aa00af98ea20ba
Parents: ef4d71c
Author: Botong Huang 
Authored: Fri Sep 21 17:30:44 2018 -0700
Committer: Botong Huang 
Committed: Fri Sep 21 17:30:44 2018 -0700

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  25 +++
 .../src/main/resources/yarn-default.xml |  28 
 .../store/impl/MemoryFederationStateStore.java  |   2 -
 .../pb/ApplicationHomeSubClusterPBImpl.java |   3 +
 .../utils/FederationStateStoreFacade.java   |  33 
 .../server/globalpolicygenerator/GPGUtils.java  |  21 ++-
 .../GlobalPolicyGenerator.java  |  23 ++-
 .../applicationcleaner/ApplicationCleaner.java  | 154 +++
 .../DefaultApplicationCleaner.java  |  82 ++
 .../applicationcleaner/package-info.java|  19 +++
 .../TestDefaultApplicationCleaner.java  | 130 
 11 files changed, 513 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3671dc3e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 54e29a0..1464892 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3383,6 +3383,31 @@ public class YarnConfiguration extends Configuration {
   FEDERATION_GPG_PREFIX + "subcluster.heartbeat.expiration-ms";
   public static final long DEFAULT_GPG_SUBCLUSTER_EXPIRATION_MS = 180;
 
+  // The application cleaner class to use
+  public static final String GPG_APPCLEANER_CLASS =
+  FEDERATION_GPG_PREFIX + "application.cleaner.class";
+  public static final String DEFAULT_GPG_APPCLEANER_CLASS =
+  "org.apache.hadoop.yarn.server.globalpolicygenerator"
+  + ".applicationcleaner.DefaultApplicationCleaner";
+
+  // The interval at which the application cleaner runs, -1 means disabled
+  public static final String GPG_APPCLEANER_INTERVAL_MS =
+  FEDERATION_GPG_PREFIX + "application.cleaner.interval-ms";
+  public static final long DEFAULT_GPG_APPCLEANER_INTERVAL_MS = -1;
+
+  /**
+   * Specifications on how (many times) to contact Router for apps. We need to
+   * do this because Router might return partial application list because some
+   * sub-cluster RM is not responsive (e.g. failing over).
+   *
+   * Should have three values separated by comma: minimal success retries,
+   * maximum total retry, retry interval (ms).
+   */
+  public static final String GPG_APPCLEANER_CONTACT_ROUTER_SPEC =
+  FEDERATION_GPG_PREFIX + "application.cleaner.contact.router.spec";
+  public static final String DEFAULT_GPG_APPCLEANER_CONTACT_ROUTER_SPEC =
+  "3,10,60";
+
   public static final String FEDERATION_GPG_POLICY_PREFIX =
   FEDERATION_GPG_PREFIX + "policy.generator.";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3671dc3e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 9e71cc6..39871df 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -3635,6 +3635,34 @@
 
   
 
+  The Application Cleaner implementation class for GPG to use.
+
+yarn.federation.gpg.application.cleaner.class
+
org.apache.hadoop.yarn.server.globalpolicygenerator.applicationcleaner.DefaultApplicationCleaner
+  
+
+  
+
+  The interval at which the application cleaner runs, -1 means disabled.
+
+yarn.federation.gpg.application.cleaner.interval-ms
+-1
+  
+
+  
+
+  Specifications on how (many times) to contact Router 

hadoop git commit: HDFS-13927. Improve TestDataNodeMultipleRegistrations#testDNWithInvalidStorageWithHA wait. Contributed by Ayush Saxena.

2018-09-21 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0cd634610 -> 4758b4b6d


HDFS-13927. Improve 
TestDataNodeMultipleRegistrations#testDNWithInvalidStorageWithHA wait. 
Contributed by Ayush Saxena.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4758b4b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4758b4b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4758b4b6

Branch: refs/heads/trunk
Commit: 4758b4b6dabb315566fc3819ed1798f3606c31f4
Parents: 0cd6346
Author: Inigo Goiri 
Authored: Fri Sep 21 15:32:28 2018 -0700
Committer: Inigo Goiri 
Committed: Fri Sep 21 15:32:28 2018 -0700

--
 .../TestDataNodeMultipleRegistrations.java  | 26 ++--
 1 file changed, 19 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4758b4b6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
index bd28fde..b2e3142 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMultipleRegistrations.java
@@ -19,7 +19,6 @@
 package org.apache.hadoop.hdfs.server.datanode;
 
 import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotSame;
 import static org.junit.Assert.assertTrue;
@@ -31,6 +30,7 @@ import java.util.Map;
 
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import com.google.common.base.Supplier;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -38,9 +38,11 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
 import org.apache.hadoop.hdfs.MiniDFSNNTopology;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.BPServiceActor.RunningState;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.junit.Assert;
 import org.junit.Before;
@@ -293,12 +295,22 @@ public class TestDataNodeMultipleRegistrations {
   cluster.restartNameNode(0, false);
   cluster.restartNameNode(1, false);
   cluster.restartDataNode(dnProp);
-  
-  // let the initialization be complete
-  Thread.sleep(1);
-  dn = cluster.getDataNodes().get(0);
-  assertFalse("Datanode should have shutdown as only service failed",
-  dn.isDatanodeUp());
+  final DataNode restartedDn = cluster.getDataNodes().get(0);
+
+  // Wait till datanode confirms FAILED running state.
+  GenericTestUtils.waitFor(new Supplier() {
+@Override
+public Boolean get() {
+  for (BPOfferService bp : restartedDn.getAllBpOs()) {
+for (BPServiceActor ba : bp.getBPServiceActors()) {
+  if (!ba.getRunningState().equals(RunningState.FAILED.name())) {
+return false;
+  }
+}
+  }
+  return true;
+}
+  }, 500, 1);
 } finally {
   cluster.shutdown();
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-6510. Fix profs stat file warning caused by process names that includes parenthesis. (Wilfred Spiegelenburg via Haibo Chen)

2018-09-21 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 7a268528c -> 552248139


YARN-6510. Fix profs stat file warning caused by process names that includes 
parenthesis. (Wilfred Spiegelenburg via Haibo Chen)

(cherry picked from commit 4f3ca0396a810f54f7fd0489a224c1bb13143aa4)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55224813
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55224813
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55224813

Branch: refs/heads/branch-2.8
Commit: 552248139b34f5b3f157ac76936fc27ecaf0c9ec
Parents: 7a26852
Author: Haibo Chen 
Authored: Wed Apr 26 11:43:27 2017 -0700
Committer: Jason Lowe 
Committed: Fri Sep 21 15:06:10 2018 -0500

--
 .../org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java  | 2 +-
 .../apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java  | 8 
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55224813/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 77c5655..52f1d0e 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -58,7 +58,7 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   private static final String PROCFS = "/proc/";
 
   private static final Pattern PROCFS_STAT_FILE_FORMAT = Pattern.compile(
-  "^([\\d-]+)\\s\\(([^)]+)\\)\\s[^\\s]\\s([\\d-]+)\\s([\\d-]+)\\s" +
+  "^([\\d-]+)\\s\\((.*)\\)\\s[^\\s]\\s([\\d-]+)\\s([\\d-]+)\\s" +
   "([\\d-]+)\\s([\\d-]+\\s){7}(\\d+)\\s(\\d+)\\s([\\d-]+\\s){7}(\\d+)\\s" +
   "(\\d+)(\\s[\\d-]+){15}");
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55224813/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
index 96ec659..4ffc67f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestProcfsBasedProcessTree.java
@@ -421,7 +421,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200", "2000", "400"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "200", "100",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
   "100", "30", "300", "3000", "600"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "1", "400", "400",
@@ -565,7 +565,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "1", "300", "300",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "1", "300", "300",
   "30", "300"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "100", "100",
@@ -817,7 +817,7 @@ public class TestProcfsBasedProcessTree {
   new ProcessStatInfo(new String[]{"200", "process two", "100", "100",
   "100", "20", "200", "2000", "400"});
   procInfos[2] =
-  new ProcessStatInfo(new String[]{"300", "proc3", "200", "100",
+  new ProcessStatInfo(new String[]{"300", "proc(3)", "200", "100",
   "100", "30", "300", "3000", "600"});
   procInfos[3] =
   new ProcessStatInfo(new String[]{"400", "proc4", "200", "100",
@@ -840,7 +840,7 @@ public class TestProcfsBasedProcessTree {
   String[] cmdLines = new String[numProcesses];
   cmdLines[0] = "proc1 arg1 arg2";
   cmdLines[1] = "process two arg3 arg4";
-  cmdLines[2] = "proc3 arg5 arg6";

hadoop git commit: HDFS-13830. Backport HDFS-13141 to branch-3.0: WebHDFS: Add support for getting snasphottable directory list. Contributed by Siyao Meng, Lokesh Jain.

2018-09-21 Thread weichiu
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 fc6d851f7 -> 2449795b8


HDFS-13830. Backport HDFS-13141 to branch-3.0: WebHDFS: Add support for getting 
snasphottable directory list. Contributed by Siyao Meng, Lokesh Jain.

Signed-off-by: Wei-Chiu Chuang 


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2449795b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2449795b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2449795b

Branch: refs/heads/branch-3.0
Commit: 2449795b8e296baba0b1c157c0ab3b856bd05f7e
Parents: fc6d851
Author: Siyao Meng 
Authored: Fri Sep 21 12:37:43 2018 -0700
Committer: Wei-Chiu Chuang 
Committed: Fri Sep 21 12:41:03 2018 -0700

--
 .../java/org/apache/hadoop/fs/FileStatus.java   | 26 +--
 .../hadoop/hdfs/DFSOpsCountStatistics.java  |  1 +
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 28 +++-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 43 
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 14 
 .../hadoop/hdfs/web/resources/GetOpParam.java   |  3 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |  7 ++
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 20 ++
 .../org/apache/hadoop/hdfs/web/TestWebHDFS.java | 73 +++-
 9 files changed, 205 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2449795b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index 35f5316..bdfbd20 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -56,15 +56,25 @@ public class FileStatus implements Writable, 
Comparable,
   private Path symlink;
   private Set attr;
 
-  private enum AttrFlags {
+  public enum AttrFlags {
 HAS_ACL,
 HAS_CRYPT,
 HAS_EC,
 SNAPSHOT_ENABLED
   }
-  private static final Set NONE = Collections.emptySet();
-  private static Set flags(boolean acl, boolean crypt, boolean ec) {
-if (!(acl || crypt || ec)) {
+  public static final Set NONE = Collections.emptySet();
+
+  /**
+   * Convert boolean attributes to a set of flags.
+   * @param acl   See {@link AttrFlags#HAS_ACL}.
+   * @param crypt See {@link AttrFlags#HAS_CRYPT}.
+   * @param ecSee {@link AttrFlags#HAS_EC}.
+   * @param snSee {@link AttrFlags#SNAPSHOT_ENABLED}.
+   * @return converted set of flags.
+   */
+  public static Set flags(boolean acl, boolean crypt,
+  boolean ec, boolean sn) {
+if (!(acl || crypt || ec || sn)) {
   return NONE;
 }
 EnumSet ret = EnumSet.noneOf(AttrFlags.class);
@@ -77,6 +87,9 @@ public class FileStatus implements Writable, 
Comparable,
 if (ec) {
   ret.add(AttrFlags.HAS_EC);
 }
+if (sn) {
+  ret.add(AttrFlags.SNAPSHOT_ENABLED);
+}
 return ret;
   }
 
@@ -136,7 +149,7 @@ public class FileStatus implements Writable, 
Comparable,
 this.group = (group == null) ? "" : group;
 this.symlink = symlink;
 this.path = path;
-attr = flags(hasAcl, isEncrypted, isErasureCoded);
+attr = flags(hasAcl, isEncrypted, isErasureCoded, false);
 
 // The variables isdir and symlink indicate the type:
 // 1. isdir implies directory, in which case symlink must be null.
@@ -480,7 +493,8 @@ public class FileStatus implements Writable, 
Comparable,
 setGroup(other.getGroup());
 setSymlink((other.isSymlink() ? other.getSymlink() : null));
 setPath(other.getPath());
-attr = flags(other.hasAcl(), other.isEncrypted(), other.isErasureCoded());
+attr = flags(other.hasAcl(), other.isEncrypted(), other.isErasureCoded(),
+other.isSnapshotEnabled());
 assert !(isDirectory() && isSymlink()) : "A directory cannot be a symlink";
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2449795b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
index bbd1bd7..3dcf13b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java
@@ 

hadoop git commit: YARN-8809. Refactor AbstractYarnScheduler and CapacityScheduler OPPORTUNISTIC container completion codepaths. (Haibo Chen via asuresh)

2018-09-21 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 36ec27ed3 -> 4d858dd92


YARN-8809. Refactor AbstractYarnScheduler and CapacityScheduler OPPORTUNISTIC 
container completion codepaths. (Haibo Chen via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d858dd9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d858dd9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d858dd9

Branch: refs/heads/YARN-1011
Commit: 4d858dd92d0cf388330c41e984fde5fd0c5aa95a
Parents: 36ec27e
Author: Arun Suresh 
Authored: Fri Sep 21 12:02:00 2018 -0700
Committer: Arun Suresh 
Committed: Fri Sep 21 12:02:00 2018 -0700

--
 .../scheduler/AbstractYarnScheduler.java| 32 
 .../scheduler/capacity/CapacityScheduler.java   | 25 ++-
 .../scheduler/fair/FairScheduler.java   | 11 ---
 .../scheduler/fifo/FifoScheduler.java   |  2 +-
 .../scheduler/fair/TestFairScheduler.java   | 25 +++
 5 files changed, 70 insertions(+), 25 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d858dd9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
index a05ee26..d382263 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AbstractYarnScheduler.java
@@ -677,25 +677,12 @@ public abstract class AbstractYarnScheduler
 }
 
 if (rmContainer.getExecutionType() == ExecutionType.GUARANTEED) {
-  completedContainerInternal(rmContainer, containerStatus, event);
+  completeGuaranteedContainerInternal(rmContainer, containerStatus, event);
   completeOustandingUpdatesWhichAreReserved(
   rmContainer, containerStatus, event);
 } else {
-  ContainerId containerId = rmContainer.getContainerId();
-  // Inform the container
-  rmContainer.handle(
-  new RMContainerFinishedEvent(containerId, containerStatus, event));
-  SchedulerApplicationAttempt schedulerAttempt =
-  getCurrentAttemptForContainer(containerId);
-  if (schedulerAttempt != null) {
-schedulerAttempt.removeRMContainer(containerId);
-  }
-  if (LOG.isDebugEnabled()) {
-LOG.debug("Completed container: " + rmContainer.getContainerId() +
-" in state: " + rmContainer.getState() + " event:" + event);
-  }
-  getSchedulerNode(rmContainer.getNodeId()).releaseContainer(
-  rmContainer.getContainerId(), false);
+  completeOpportunisticContainerInternal(rmContainer, containerStatus,
+  event);
 }
 
 // If the container is getting killed in ACQUIRED state, the requester (AM
@@ -705,6 +692,12 @@ public abstract class AbstractYarnScheduler
 recoverResourceRequestForContainer(rmContainer);
   }
 
+  protected void completeOpportunisticContainerInternal(
+  RMContainer rmContainer, ContainerStatus containerStatus,
+  RMContainerEventType event) {
+completeGuaranteedContainerInternal(rmContainer, containerStatus, event);
+  }
+
   // Optimization:
   // Check if there are in-flight container updates and complete the
   // associated temp containers. These are removed when the app completes,
@@ -722,7 +715,7 @@ public abstract class AbstractYarnScheduler
 .getReservedSchedulerKey().getContainerToUpdate();
 if (containerToUpdate != null &&
 containerToUpdate.equals(containerStatus.getContainerId())) {
-  completedContainerInternal(resContainer,
+  completeGuaranteedContainerInternal(resContainer,
   ContainerStatus.newInstance(resContainer.getContainerId(),
   containerStatus.getState(), containerStatus
   .getDiagnostics(),
@@ -732,8 +725,9 @@ public abstract class AbstractYarnScheduler
 }
   }
 
-  // clean up a completed container
-  protected abstract void completedContainerInternal(RMContainer rmContainer,
+  // clean up a completed guaranteed container
+  protected 

hadoop git commit: YARN-8769. [Submarine] Allow user to specify customized quicklink(s) when submit Submarine job. Contributed by Wangda Tan.

2018-09-21 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk a2752779a -> 0cd634610


YARN-8769. [Submarine] Allow user to specify customized quicklink(s) when 
submit Submarine job. Contributed by Wangda Tan.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0cd63461
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0cd63461
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0cd63461

Branch: refs/heads/trunk
Commit: 0cd63461021cc7cac39e7cc2bfaafd609c82fc79
Parents: a275277
Author: Sunil G 
Authored: Fri Sep 21 23:39:22 2018 +0530
Committer: Sunil G 
Committed: Fri Sep 21 23:39:22 2018 +0530

--
 .../yarn/submarine/client/cli/CliConstants.java |  1 +
 .../yarn/submarine/client/cli/RunJobCli.java|  8 ++
 .../submarine/client/cli/param/Quicklink.java   | 71 ++
 .../client/cli/param/RunJobParameters.java  | 18 
 .../yarnservice/YarnServiceJobSubmitter.java| 99 ++--
 .../runtimes/yarnservice/YarnServiceUtils.java  | 47 --
 .../yarnservice/TestYarnServiceRunJobCli.java   | 94 +++
 7 files changed, 303 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
index d51ffc7..454ff1c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/CliConstants.java
@@ -49,6 +49,7 @@ public class CliConstants {
   public static final String WAIT_JOB_FINISH = "wait_job_finish";
   public static final String PS_DOCKER_IMAGE = "ps_docker_image";
   public static final String WORKER_DOCKER_IMAGE = "worker_docker_image";
+  public static final String QUICKLINK = "quicklink";
   public static final String TENSORBOARD_DOCKER_IMAGE =
   "tensorboard_docker_image";
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
index faa22d3..5054a94 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/RunJobCli.java
@@ -117,6 +117,14 @@ public class RunJobCli extends AbstractCli {
 options.addOption(CliConstants.WORKER_DOCKER_IMAGE, true,
 "Specify docker image for WORKER, when this is not specified, WORKER "
 + "uses --" + CliConstants.DOCKER_IMAGE + " as default.");
+options.addOption(CliConstants.QUICKLINK, true, "Specify quicklink so YARN"
++ "web UI shows link to given role instance and port. When "
++ "--tensorboard is speciied, quicklink to tensorboard instance will "
++ "be added automatically. The format of quick link is: "
++ "Quick_link_label=http(or https)://role-name:port. For example, "
++ "if want to link to first worker's 7070 port, and text of quicklink "
++ "is Notebook_UI, user need to specify --quicklink "
++ "Notebook_UI=https://master-0:7070;);
 options.addOption("h", "help", false, "Print help");
 return options;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0cd63461/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-submarine/src/main/java/org/apache/hadoop/yarn/submarine/client/cli/param/Quicklink.java
--
diff --git 

hadoop git commit: YARN-8658. [AMRMProxy] Metrics for AMRMClientRelayer inside FederationInterceptor. Contributed by Young Chen.

2018-09-21 Thread gifuma
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3a6ad9cd3 -> 60565976e


YARN-8658. [AMRMProxy] Metrics for AMRMClientRelayer inside 
FederationInterceptor. Contributed by Young Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60565976
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60565976
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60565976

Branch: refs/heads/branch-2
Commit: 60565976e1f14970261a832089fa4fad0d14fd8f
Parents: 3a6ad9c
Author: Giovanni Matteo Fumarola 
Authored: Fri Sep 21 10:36:36 2018 -0700
Committer: Giovanni Matteo Fumarola 
Committed: Fri Sep 21 10:36:36 2018 -0700

--
 .../hadoop/yarn/server/AMRMClientRelayer.java   | 384 ++
 .../metrics/AMRMClientRelayerMetrics.java   | 368 +
 .../yarn/server/metrics/package-info.java   |  18 +
 .../yarn/server/uam/UnmanagedAMPoolManager.java |  30 +-
 .../server/uam/UnmanagedApplicationManager.java |  16 +-
 .../yarn/server/TestAMRMClientRelayer.java  |   2 +-
 .../metrics/TestAMRMClientRelayerMetrics.java   | 513 +++
 .../uam/TestUnmanagedApplicationManager.java|   2 +-
 .../amrmproxy/FederationInterceptor.java|  19 +-
 .../TestableFederationInterceptor.java  |   5 +-
 10 files changed, 1228 insertions(+), 129 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60565976/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
index 62898ec..a7ed373 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/AMRMClientRelayer.java
@@ -38,6 +38,7 @@ import 
org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRespons
 import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
 import 
org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerStatus;
 import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest;
@@ -51,6 +52,7 @@ import 
org.apache.hadoop.yarn.exceptions.ApplicationMasterNotRegisteredException
 import 
org.apache.hadoop.yarn.exceptions.InvalidApplicationMasterRequestException;
 import org.apache.hadoop.yarn.exceptions.YarnException;
 import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.hadoop.yarn.server.metrics.AMRMClientRelayerMetrics;
 import org.apache.hadoop.yarn.server.scheduler.ResourceRequestSet;
 import org.apache.hadoop.yarn.server.scheduler.ResourceRequestSetKey;
 import org.slf4j.Logger;
@@ -97,6 +99,15 @@ public class AMRMClientRelayer extends AbstractService
   private Set ask =
   new TreeSet<>(new ResourceRequest.ResourceRequestComparator());
 
+  /**
+   * Data structures for pending and allocate latency metrics. This only 
applies
+   * for requests with non-zero allocationRequestId.
+   */
+  private Map pendingCountForMetrics = new HashMap<>();
+  private Map askTimeStamp = new HashMap<>();
+  // List of allocated containerId to avoid double counting
+  private Set knownContainers = new HashSet<>();
+
   private Set remotePendingRelease = new HashSet<>();
   private Set release = new HashSet<>();
 
@@ -107,6 +118,7 @@ public class AMRMClientRelayer extends AbstractService
   private Map remotePendingChange =
   new HashMap<>();
   private Map change = new HashMap<>();
+  private Map changeTimeStamp = new HashMap<>();
 
   private ApplicationId appId;
 
@@ -114,16 +126,26 @@ public class AMRMClientRelayer extends AbstractService
   // heartbeat
   private volatile int resetResponseId;
 
+  private String rmId = "";
+  private volatile boolean shutdown = false;
+
+  private AMRMClientRelayerMetrics metrics;
+
   public AMRMClientRelayer() {
 super(AMRMClientRelayer.class.getName());
 this.resetResponseId = -1;
+this.metrics = AMRMClientRelayerMetrics.getInstance();
+this.rmClient = null;
+this.appId = null;
+this.rmId = "";
   }
 
   public 

svn commit: r1841570 - in /hadoop/common/site/main/publish/docs/r2.8.5: ./ api/ api/org/ api/org/apache/ api/org/apache/hadoop/ api/org/apache/hadoop/ant/ api/org/apache/hadoop/ant/condition/ api/org/

2018-09-21 Thread junping_du
Author: junping_du
Date: Fri Sep 21 11:37:12 2018
New Revision: 1841570

URL: http://svn.apache.org/viewvc?rev=1841570=rev
Log:
Updated site for release 2.8.5


[This commit notification would consist of 3444 parts, 
which exceeds the limit of 50 ones, so it was shortened to the summary.]

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-8628. [UI2] Few duplicated or inconsistent information displayed in UI2. Contributed by Akhil PB.

2018-09-21 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.1 e9315f668 -> 6699b9658


YARN-8628. [UI2] Few duplicated or inconsistent information displayed in UI2. 
Contributed by Akhil PB.

(cherry picked from commit a2752779ac1545f5e0a52fce3cff02a7007e95fb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6699b965
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6699b965
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6699b965

Branch: refs/heads/branch-3.1
Commit: 6699b9658e7425da715dc92f71b857890fe609c8
Parents: e9315f6
Author: Sunil G 
Authored: Fri Sep 21 15:47:10 2018 +0530
Committer: Sunil G 
Committed: Fri Sep 21 16:06:22 2018 +0530

--
 .../src/main/webapp/app/controllers/yarn-app/components.js| 2 +-
 .../webapp/app/controllers/yarn-component-instance/info.js| 5 +++--
 .../webapp/app/controllers/yarn-component-instances/info.js   | 3 ++-
 .../main/webapp/app/routes/yarn-component-instance/info.js| 4 ++--
 .../main/webapp/app/serializers/yarn-component-instance.js| 1 -
 .../src/main/webapp/app/serializers/yarn-container.js | 2 +-
 .../src/main/webapp/app/serializers/yarn-service-component.js | 2 +-
 .../main/webapp/app/serializers/yarn-timeline-container.js| 2 +-
 .../src/main/webapp/app/templates/yarn-app/configs.hbs| 7 ---
 .../webapp/app/templates/yarn-component-instance/info.hbs | 4 
 10 files changed, 15 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6699b965/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
index 5981eb5..5a6c616 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
@@ -41,7 +41,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.get('name'),
-  href: 
`#/yarn-component-instances/${row.get('name')}/info?service=${service}&=${appId}`
+  href: 
`#/yarn-component-instances/${row.get('name')}/info?service=${service}=${appId}`
 };
   }
 }, {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6699b965/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
index e3abcb7..e920aa2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
@@ -19,7 +19,8 @@
 import Ember from 'ember';
 
 export default Ember.Controller.extend({
-  queryParams: ["appid", "service"],
+  queryParams: ["appid", "service", "containerid"],
   appid: undefined,
-  service: undefined
+  service: undefined,
+  containerid: undefined
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6699b965/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
index 44cfe17..be4b4f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
@@ -42,9 +42,10 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 var component = row.get('component');
 var instance = row.get('instanceName');
+var containerId = row.get('containerId');
 return {
   text: instance,
-  href: 
`#/yarn-component-instance/${component}/instances/${instance}/info?appid=${appId}&=${serviceName}`
+  href: 

hadoop git commit: YARN-8628. [UI2] Few duplicated or inconsistent information displayed in UI2. Contributed by Akhil PB.

2018-09-21 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/trunk 524f7cd35 -> a2752779a


YARN-8628. [UI2] Few duplicated or inconsistent information displayed in UI2. 
Contributed by Akhil PB.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2752779
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2752779
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2752779

Branch: refs/heads/trunk
Commit: a2752779ac1545f5e0a52fce3cff02a7007e95fb
Parents: 524f7cd
Author: Sunil G 
Authored: Fri Sep 21 15:47:10 2018 +0530
Committer: Sunil G 
Committed: Fri Sep 21 15:47:10 2018 +0530

--
 .../src/main/webapp/app/controllers/yarn-app/components.js| 2 +-
 .../webapp/app/controllers/yarn-component-instance/info.js| 5 +++--
 .../webapp/app/controllers/yarn-component-instances/info.js   | 3 ++-
 .../main/webapp/app/routes/yarn-component-instance/info.js| 4 ++--
 .../main/webapp/app/serializers/yarn-component-instance.js| 1 -
 .../src/main/webapp/app/serializers/yarn-container.js | 2 +-
 .../src/main/webapp/app/serializers/yarn-service-component.js | 2 +-
 .../main/webapp/app/serializers/yarn-timeline-container.js| 2 +-
 .../src/main/webapp/app/templates/yarn-app/configs.hbs| 7 ---
 .../webapp/app/templates/yarn-component-instance/info.hbs | 4 
 10 files changed, 15 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
index 5981eb5..5a6c616 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-app/components.js
@@ -41,7 +41,7 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 return {
   displayText: row.get('name'),
-  href: 
`#/yarn-component-instances/${row.get('name')}/info?service=${service}&=${appId}`
+  href: 
`#/yarn-component-instances/${row.get('name')}/info?service=${service}=${appId}`
 };
   }
 }, {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
index e3abcb7..e920aa2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instance/info.js
@@ -19,7 +19,8 @@
 import Ember from 'ember';
 
 export default Ember.Controller.extend({
-  queryParams: ["appid", "service"],
+  queryParams: ["appid", "service", "containerid"],
   appid: undefined,
-  service: undefined
+  service: undefined,
+  containerid: undefined
 });

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2752779/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
index 44cfe17..be4b4f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/controllers/yarn-component-instances/info.js
@@ -42,9 +42,10 @@ export default Ember.Controller.extend({
   getCellContent: function(row) {
 var component = row.get('component');
 var instance = row.get('instanceName');
+var containerId = row.get('containerId');
 return {
   text: instance,
-  href: 
`#/yarn-component-instance/${component}/instances/${instance}/info?appid=${appId}&=${serviceName}`
+  href: 
`#/yarn-component-instance/${component}/instances/${instance}/info?appid=${appId}=${serviceName}=${containerId}`
 };
  

hadoop git commit: HADOOP-15778. ABFS: Fix client side throttling for read. Contributed by Sneha Varma.

2018-09-21 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-15407 a5692c2da -> d0b4624c8


HADOOP-15778. ABFS: Fix client side throttling for read.
Contributed by Sneha Varma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0b4624c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0b4624c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0b4624c

Branch: refs/heads/HADOOP-15407
Commit: d0b4624c88fc48932a7c2800185ed48bb1c5e0fe
Parents: a5692c2
Author: Steve Loughran 
Authored: Fri Sep 21 11:06:24 2018 +0100
Committer: Steve Loughran 
Committed: Fri Sep 21 11:06:24 2018 +0100

--
 .../services/AbfsClientThrottlingIntercept.java | 22 ++--
 .../fs/azurebfs/services/AbfsRestOperation.java |  3 ++-
 2 files changed, 22 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0b4624c/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java
index 97ea2a6..1c6ce17 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsClientThrottlingIntercept.java
@@ -19,9 +19,12 @@
 package org.apache.hadoop.fs.azurebfs.services;
 
 import java.net.HttpURLConnection;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import org.apache.hadoop.fs.azurebfs.constants.HttpHeaderConfigurations;
+
 /**
  * Throttles Azure Blob File System read and write operations to achieve 
maximum
  * throughput by minimizing errors.  The errors occur when the account ingress
@@ -37,6 +40,7 @@ import org.slf4j.LoggerFactory;
 public final class AbfsClientThrottlingIntercept {
   private static final Logger LOG = LoggerFactory.getLogger(
   AbfsClientThrottlingIntercept.class);
+  private static final String RANGE_PREFIX = "bytes=";
   private static AbfsClientThrottlingIntercept singleton = null;
   private AbfsClientThrottlingAnalyzer readThrottler = null;
   private AbfsClientThrottlingAnalyzer writeThrottler = null;
@@ -82,7 +86,8 @@ public final class AbfsClientThrottlingIntercept {
 }
 break;
   case ReadFile:
-contentLength = abfsHttpOperation.getBytesReceived();
+String range = 
abfsHttpOperation.getConnection().getRequestProperty(HttpHeaderConfigurations.RANGE);
+contentLength = getContentLengthIfKnown(range);
 if (contentLength > 0) {
   singleton.readThrottler.addBytesTransferred(contentLength,
   isFailedOperation);
@@ -114,4 +119,17 @@ public final class AbfsClientThrottlingIntercept {
 break;
 }
   }
-}
\ No newline at end of file
+
+  private static long getContentLengthIfKnown(String range) {
+long contentLength = 0;
+// Format is "bytes=%d-%d"
+if (range != null && range.startsWith(RANGE_PREFIX)) {
+  String[] offsets = range.substring(RANGE_PREFIX.length()).split("-");
+  if (offsets.length == 2) {
+contentLength = Long.parseLong(offsets[1]) - Long.parseLong(offsets[0])
++ 1;
+  }
+}
+return contentLength;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0b4624c/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java
--
diff --git 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java
 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java
index 9a71879..3f5717e 100644
--- 
a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java
+++ 
b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azurebfs/services/AbfsRestOperation.java
@@ -156,9 +156,10 @@ public class AbfsRestOperation {
 client.getAccessToken());
   }
 
+  AbfsClientThrottlingIntercept.sendingRequest(operationType);
+
   if (hasRequestBody) {
 // HttpUrlConnection requires
-AbfsClientThrottlingIntercept.sendingRequest(operationType);
 httpOperation.sendRequest(buffer, bufferOffset, bufferLength);
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: 

hadoop git commit: HADOOP-15735. backport HADOOP-11687 Intermittent signature match failures in S3AFileSystem. Contributed by MunShik JOUNG.

2018-09-21 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 991461340 -> b9b737732


HADOOP-15735. backport HADOOP-11687 Intermittent signature match failures in 
S3AFileSystem.
Contributed by MunShik JOUNG.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9b73773
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9b73773
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9b73773

Branch: refs/heads/branch-2.7
Commit: b9b737732f55db466a44e4fde06e806fa3b43af2
Parents: 9914613
Author: Steve Loughran 
Authored: Fri Sep 21 10:37:59 2018 +0100
Committer: Steve Loughran 
Committed: Fri Sep 21 10:37:59 2018 +0100

--
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java | 61 +++-
 1 file changed, 60 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9b73773/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
index 9880037..1f9542a 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFileSystem.java
@@ -26,6 +26,7 @@ import java.net.URI;
 import java.util.ArrayList;
 import java.util.Date;
 import java.util.List;
+import java.util.Map;
 import java.util.concurrent.LinkedBlockingQueue;
 import java.util.concurrent.ThreadFactory;
 import java.util.concurrent.ThreadPoolExecutor;
@@ -1086,7 +1087,7 @@ public class S3AFileSystem extends FileSystem {
 }
 
 ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
-final ObjectMetadata dstom = srcom.clone();
+ObjectMetadata dstom = cloneObjectMetadata(srcom);
 if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
   dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
 }
@@ -1193,6 +1194,64 @@ public class S3AFileSystem extends FileSystem {
   }
 
   /**
+   * Creates a copy of the passed {@link ObjectMetadata}.
+   * Does so without using the {@link ObjectMetadata#clone()} method,
+   * to avoid copying unnecessary headers.
+   * @param source the {@link ObjectMetadata} to copy
+   * @return a copy of {@link ObjectMetadata} with only relevant attributes
+   */
+  private ObjectMetadata cloneObjectMetadata(ObjectMetadata source) {
+// This approach may be too brittle, especially if
+// in future there are new attributes added to ObjectMetadata
+// that we do not explicitly call to set here
+ObjectMetadata ret = new ObjectMetadata();
+
+// Non null attributes
+ret.setContentLength(source.getContentLength());
+
+// Possibly null attributes
+// Allowing nulls to pass breaks it during later use
+if (source.getCacheControl() != null) {
+  ret.setCacheControl(source.getCacheControl());
+}
+if (source.getContentDisposition() != null) {
+  ret.setContentDisposition(source.getContentDisposition());
+}
+if (source.getContentEncoding() != null) {
+  ret.setContentEncoding(source.getContentEncoding());
+}
+if (source.getContentMD5() != null) {
+  ret.setContentMD5(source.getContentMD5());
+}
+if (source.getContentType() != null) {
+  ret.setContentType(source.getContentType());
+}
+if (source.getExpirationTime() != null) {
+  ret.setExpirationTime(source.getExpirationTime());
+}
+if (source.getExpirationTimeRuleId() != null) {
+  ret.setExpirationTimeRuleId(source.getExpirationTimeRuleId());
+}
+if (source.getHttpExpiresDate() != null) {
+  ret.setHttpExpiresDate(source.getHttpExpiresDate());
+}
+if (source.getLastModified() != null) {
+  ret.setLastModified(source.getLastModified());
+}
+if (source.getOngoingRestore() != null) {
+  ret.setOngoingRestore(source.getOngoingRestore());
+}
+if (source.getRestoreExpirationTime() != null) {
+  ret.setRestoreExpirationTime(source.getRestoreExpirationTime());
+}
+
+for (Map.Entry e : source.getUserMetadata().entrySet()) {
+  ret.addUserMetadata(e.getKey(), e.getValue());
+}
+return ret;
+  }
+
+  /**
* Return the number of bytes that large input files should be optimally
* be split into to minimize i/o time.
* @deprecated use {@link #getDefaultBlockSize(Path)} instead


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org